Subversion Repositories DIN Is Noise

Rev

Rev 2096 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2183 jag 1
/************************************************************************/
2
/*! \class RtAudio
3
    \brief Realtime audio i/o C++ classes.
4
 
5
    RtAudio provides a common API (Application Programming Interface)
6
    for realtime audio input/output across Linux (native ALSA, Jack,
7
    and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8
    (DirectSound, ASIO and WASAPI) operating systems.
9
 
10
    RtAudio GitHub site: https://github.com/thestk/rtaudio
11
    RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12
 
13
    RtAudio: realtime audio i/o C++ classes
14
    Copyright (c) 2001-2021 Gary P. Scavone
15
 
16
    Permission is hereby granted, free of charge, to any person
17
    obtaining a copy of this software and associated documentation files
18
    (the "Software"), to deal in the Software without restriction,
19
    including without limitation the rights to use, copy, modify, merge,
20
    publish, distribute, sublicense, and/or sell copies of the Software,
21
    and to permit persons to whom the Software is furnished to do so,
22
    subject to the following conditions:
23
 
24
    The above copyright notice and this permission notice shall be
25
    included in all copies or substantial portions of the Software.
26
 
27
    Any person wishing to distribute modifications to the Software is
28
    asked to send the modifications to the original developer so that
29
    they can be incorporated into the canonical version.  This is,
30
    however, not a binding provision of this license.
31
 
32
    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33
    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34
    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35
    IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36
    ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37
    CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38
    WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39
*/
40
/************************************************************************/
41
 
42
// RtAudio: Version 5.2.0
43
 
44
#include "RtAudio.h"
45
#include <iostream>
46
#include <cstdlib>
47
#include <cstring>
48
#include <climits>
49
#include <cmath>
50
#include <algorithm>
51
 
52
// Static variable definitions.
53
const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54
const unsigned int RtApi::SAMPLE_RATES[] = {
55
  4000, 5512, 8000, 9600, 11025, 16000, 22050,
56
  32000, 44100, 48000, 88200, 96000, 176400, 192000
57
};
58
 
59
#if defined(_WIN32) || defined(__CYGWIN__)
60
  #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61
  #define MUTEX_DESTROY(A)    DeleteCriticalSection(A)
62
  #define MUTEX_LOCK(A)       EnterCriticalSection(A)
63
  #define MUTEX_UNLOCK(A)     LeaveCriticalSection(A)
64
 
65
  #include "tchar.h"
66
 
67
  template<typename T> inline
68
  std::string convertCharPointerToStdString(const T *text);
69
 
70
  template<> inline
71
  std::string convertCharPointerToStdString(const char *text)
72
  {
73
    return std::string(text);
74
  }
75
 
76
  template<> inline
77
  std::string convertCharPointerToStdString(const wchar_t *text)
78
  {
79
    int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
80
    std::string s( length-1, '\0' );
81
    WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
82
    return s;
83
  }
84
 
85
#elif defined(__unix__) || defined(__APPLE__)
86
  // pthread API
87
  #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
88
  #define MUTEX_DESTROY(A)    pthread_mutex_destroy(A)
89
  #define MUTEX_LOCK(A)       pthread_mutex_lock(A)
90
  #define MUTEX_UNLOCK(A)     pthread_mutex_unlock(A)
91
#endif
92
 
93
// *************************************************** //
94
//
95
// RtAudio definitions.
96
//
97
// *************************************************** //
98
 
99
std::string RtAudio :: getVersion( void )
100
{
101
  return RTAUDIO_VERSION;
102
}
103
 
104
// Define API names and display names.
105
// Must be in same order as API enum.
106
extern "C" {
107
const char* rtaudio_api_names[][2] = {
108
  { "unspecified" , "Unknown" },
109
  { "alsa"        , "ALSA" },
110
  { "pulse"       , "Pulse" },
111
  { "oss"         , "OpenSoundSystem" },
112
  { "jack"        , "Jack" },
113
  { "core"        , "CoreAudio" },
114
  { "wasapi"      , "WASAPI" },
115
  { "asio"        , "ASIO" },
116
  { "ds"          , "DirectSound" },
117
  { "dummy"       , "Dummy" },
118
};
119
const unsigned int rtaudio_num_api_names =
120
  sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
121
 
122
// The order here will control the order of RtAudio's API search in
123
// the constructor.
124
extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
125
#if defined(__UNIX_JACK__)
126
  RtAudio::UNIX_JACK,
127
#endif
128
#if defined(__LINUX_PULSE__)
129
  RtAudio::LINUX_PULSE,
130
#endif
131
#if defined(__LINUX_ALSA__)
132
  RtAudio::LINUX_ALSA,
133
#endif
134
#if defined(__LINUX_OSS__)
135
  RtAudio::LINUX_OSS,
136
#endif
137
#if defined(__WINDOWS_ASIO__)
138
  RtAudio::WINDOWS_ASIO,
139
#endif
140
#if defined(__WINDOWS_WASAPI__)
141
  RtAudio::WINDOWS_WASAPI,
142
#endif
143
#if defined(__WINDOWS_DS__)
144
  RtAudio::WINDOWS_DS,
145
#endif
146
#if defined(__MACOSX_CORE__)
147
  RtAudio::MACOSX_CORE,
148
#endif
149
#if defined(__RTAUDIO_DUMMY__)
150
  RtAudio::RTAUDIO_DUMMY,
151
#endif
152
  RtAudio::UNSPECIFIED,
153
};
154
extern "C" const unsigned int rtaudio_num_compiled_apis =
155
  sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
156
}
157
 
158
// This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
159
// If the build breaks here, check that they match.
160
template<bool b> class StaticAssert { private: StaticAssert() {} };
161
template<> class StaticAssert<true>{ public: StaticAssert() {} };
162
class StaticAssertions { StaticAssertions() {
163
  StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
164
}};
165
 
166
void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
167
{
168
  apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
169
                                   rtaudio_compiled_apis + rtaudio_num_compiled_apis);
170
}
171
 
172
std::string RtAudio :: getApiName( RtAudio::Api api )
173
{
174
  if (api < 0 || api >= RtAudio::NUM_APIS)
175
    return "";
176
  return rtaudio_api_names[api][0];
177
}
178
 
179
std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
180
{
181
  if (api < 0 || api >= RtAudio::NUM_APIS)
182
    return "Unknown";
183
  return rtaudio_api_names[api][1];
184
}
185
 
186
RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
187
{
188
  unsigned int i=0;
189
  for (i = 0; i < rtaudio_num_compiled_apis; ++i)
190
    if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
191
      return rtaudio_compiled_apis[i];
192
  return RtAudio::UNSPECIFIED;
193
}
194
 
195
void RtAudio :: openRtApi( RtAudio::Api api )
196
{
197
  if ( rtapi_ )
198
    delete rtapi_;
199
  rtapi_ = 0;
200
 
201
#if defined(__UNIX_JACK__)
202
  if ( api == UNIX_JACK )
203
    rtapi_ = new RtApiJack();
204
#endif
205
#if defined(__LINUX_ALSA__)
206
  if ( api == LINUX_ALSA )
207
    rtapi_ = new RtApiAlsa();
208
#endif
209
#if defined(__LINUX_PULSE__)
210
  if ( api == LINUX_PULSE )
211
    rtapi_ = new RtApiPulse();
212
#endif
213
#if defined(__LINUX_OSS__)
214
  if ( api == LINUX_OSS )
215
    rtapi_ = new RtApiOss();
216
#endif
217
#if defined(__WINDOWS_ASIO__)
218
  if ( api == WINDOWS_ASIO )
219
    rtapi_ = new RtApiAsio();
220
#endif
221
#if defined(__WINDOWS_WASAPI__)
222
  if ( api == WINDOWS_WASAPI )
223
    rtapi_ = new RtApiWasapi();
224
#endif
225
#if defined(__WINDOWS_DS__)
226
  if ( api == WINDOWS_DS )
227
    rtapi_ = new RtApiDs();
228
#endif
229
#if defined(__MACOSX_CORE__)
230
  if ( api == MACOSX_CORE )
231
    rtapi_ = new RtApiCore();
232
#endif
233
#if defined(__RTAUDIO_DUMMY__)
234
  if ( api == RTAUDIO_DUMMY )
235
    rtapi_ = new RtApiDummy();
236
#endif
237
}
238
 
239
RtAudio :: RtAudio( RtAudio::Api api )
240
{
241
  rtapi_ = 0;
242
 
243
  if ( api != UNSPECIFIED ) {
244
    // Attempt to open the specified API.
245
    openRtApi( api );
246
    if ( rtapi_ ) return;
247
 
248
    // No compiled support for specified API value.  Issue a debug
249
    // warning and continue as if no API was specified.
250
    std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
251
  }
252
 
253
  // Iterate through the compiled APIs and return as soon as we find
254
  // one with at least one device or we reach the end of the list.
255
  std::vector< RtAudio::Api > apis;
256
  getCompiledApi( apis );
257
  for ( unsigned int i=0; i<apis.size(); i++ ) {
258
    openRtApi( apis[i] );
259
    if ( rtapi_ && rtapi_->getDeviceCount() ) break;
260
  }
261
 
262
  if ( rtapi_ ) return;
263
 
264
  // It should not be possible to get here because the preprocessor
265
  // definition __RTAUDIO_DUMMY__ is automatically defined if no
266
  // API-specific definitions are passed to the compiler. But just in
267
  // case something weird happens, we'll throw an error.
268
  std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
269
  throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
270
}
271
 
272
RtAudio :: ~RtAudio()
273
{
274
  if ( rtapi_ )
275
    delete rtapi_;
276
}
277
 
278
void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
279
                            RtAudio::StreamParameters *inputParameters,
280
                            RtAudioFormat format, unsigned int sampleRate,
281
                            unsigned int *bufferFrames,
282
                            RtAudioCallback callback, void *userData,
283
                            RtAudio::StreamOptions *options,
284
                            RtAudioErrorCallback errorCallback )
285
{
286
  return rtapi_->openStream( outputParameters, inputParameters, format,
287
                             sampleRate, bufferFrames, callback,
288
                             userData, options, errorCallback );
289
}
290
 
291
// *************************************************** //
292
//
293
// Public RtApi definitions (see end of file for
294
// private or protected utility functions).
295
//
296
// *************************************************** //
297
 
298
RtApi :: RtApi()
299
{
300
  stream_.state = STREAM_CLOSED;
301
  stream_.mode = UNINITIALIZED;
302
  stream_.apiHandle = 0;
303
  stream_.userBuffer[0] = 0;
304
  stream_.userBuffer[1] = 0;
305
  MUTEX_INITIALIZE( &stream_.mutex );
306
  showWarnings_ = true;
307
  firstErrorOccurred_ = false;
308
}
309
 
310
RtApi :: ~RtApi()
311
{
312
  MUTEX_DESTROY( &stream_.mutex );
313
}
314
 
315
void RtApi :: openStream( RtAudio::StreamParameters *oParams,
316
                          RtAudio::StreamParameters *iParams,
317
                          RtAudioFormat format, unsigned int sampleRate,
318
                          unsigned int *bufferFrames,
319
                          RtAudioCallback callback, void *userData,
320
                          RtAudio::StreamOptions *options,
321
                          RtAudioErrorCallback errorCallback )
322
{
323
  if ( stream_.state != STREAM_CLOSED ) {
324
    errorText_ = "RtApi::openStream: a stream is already open!";
325
    error( RtAudioError::INVALID_USE );
326
    return;
327
  }
328
 
329
  // Clear stream information potentially left from a previously open stream.
330
  clearStreamInfo();
331
 
332
  if ( oParams && oParams->nChannels < 1 ) {
333
    errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
334
    error( RtAudioError::INVALID_USE );
335
    return;
336
  }
337
 
338
  if ( iParams && iParams->nChannels < 1 ) {
339
    errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
340
    error( RtAudioError::INVALID_USE );
341
    return;
342
  }
343
 
344
  if ( oParams == NULL && iParams == NULL ) {
345
    errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
346
    error( RtAudioError::INVALID_USE );
347
    return;
348
  }
349
 
350
  if ( formatBytes(format) == 0 ) {
351
    errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
352
    error( RtAudioError::INVALID_USE );
353
    return;
354
  }
355
 
356
  unsigned int nDevices = getDeviceCount();
357
  unsigned int oChannels = 0;
358
  if ( oParams ) {
359
    oChannels = oParams->nChannels;
360
    if ( oParams->deviceId >= nDevices ) {
361
      errorText_ = "RtApi::openStream: output device parameter value is invalid.";
362
      error( RtAudioError::INVALID_USE );
363
      return;
364
    }
365
  }
366
 
367
  unsigned int iChannels = 0;
368
  if ( iParams ) {
369
    iChannels = iParams->nChannels;
370
    if ( iParams->deviceId >= nDevices ) {
371
      errorText_ = "RtApi::openStream: input device parameter value is invalid.";
372
      error( RtAudioError::INVALID_USE );
373
      return;
374
    }
375
  }
376
 
377
  bool result;
378
 
379
  if ( oChannels > 0 ) {
380
 
381
    result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
382
                              sampleRate, format, bufferFrames, options );
383
    if ( result == false ) {
384
      error( RtAudioError::SYSTEM_ERROR );
385
      return;
386
    }
387
  }
388
 
389
  if ( iChannels > 0 ) {
390
 
391
    result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
392
                              sampleRate, format, bufferFrames, options );
393
    if ( result == false ) {
394
      if ( oChannels > 0 ) closeStream();
395
      error( RtAudioError::SYSTEM_ERROR );
396
      return;
397
    }
398
  }
399
 
400
  stream_.callbackInfo.callback = (void *) callback;
401
  stream_.callbackInfo.userData = userData;
402
  stream_.callbackInfo.errorCallback = (void *) errorCallback;
403
 
404
  if ( options ) options->numberOfBuffers = stream_.nBuffers;
405
  stream_.state = STREAM_STOPPED;
406
}
407
 
408
unsigned int RtApi :: getDefaultInputDevice( void )
409
{
410
  // Should be reimplemented in subclasses if necessary.
411
  unsigned int nDevices = getDeviceCount();
412
  for ( unsigned int i = 0; i < nDevices; i++ ) {
413
    if ( getDeviceInfo( i ).isDefaultInput ) {
414
      return i;
415
    }
416
  }
417
 
418
  return 0;
419
}
420
 
421
unsigned int RtApi :: getDefaultOutputDevice( void )
422
{
423
  // Should be reimplemented in subclasses if necessary.
424
  unsigned int nDevices = getDeviceCount();
425
  for ( unsigned int i = 0; i < nDevices; i++ ) {
426
    if ( getDeviceInfo( i ).isDefaultOutput ) {
427
      return i;
428
    }
429
  }
430
 
431
  return 0;
432
}
433
 
434
void RtApi :: closeStream( void )
435
{
436
  // MUST be implemented in subclasses!
437
  return;
438
}
439
 
440
bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
441
                               unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
442
                               RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
443
                               RtAudio::StreamOptions * /*options*/ )
444
{
445
  // MUST be implemented in subclasses!
446
  return FAILURE;
447
}
448
 
449
void RtApi :: tickStreamTime( void )
450
{
451
  // Subclasses that do not provide their own implementation of
452
  // getStreamTime should call this function once per buffer I/O to
453
  // provide basic stream time support.
454
 
455
  stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
456
 
457
#if defined( HAVE_GETTIMEOFDAY )
458
  gettimeofday( &stream_.lastTickTimestamp, NULL );
459
#endif
460
}
461
 
462
long RtApi :: getStreamLatency( void )
463
{
464
  verifyStream();
465
 
466
  long totalLatency = 0;
467
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
468
    totalLatency = stream_.latency[0];
469
  if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
470
    totalLatency += stream_.latency[1];
471
 
472
  return totalLatency;
473
}
474
 
475
double RtApi :: getStreamTime( void )
476
{
477
  verifyStream();
478
 
479
#if defined( HAVE_GETTIMEOFDAY )
480
  // Return a very accurate estimate of the stream time by
481
  // adding in the elapsed time since the last tick.
482
  struct timeval then;
483
  struct timeval now;
484
 
485
  if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
486
    return stream_.streamTime;
487
 
488
  gettimeofday( &now, NULL );
489
  then = stream_.lastTickTimestamp;
490
  return stream_.streamTime +
491
    ((now.tv_sec + 0.000001 * now.tv_usec) -
492
     (then.tv_sec + 0.000001 * then.tv_usec));    
493
#else
494
  return stream_.streamTime;
495
#endif
496
}
497
 
498
void RtApi :: setStreamTime( double time )
499
{
500
  verifyStream();
501
 
502
  if ( time >= 0.0 )
503
    stream_.streamTime = time;
504
#if defined( HAVE_GETTIMEOFDAY )
505
  gettimeofday( &stream_.lastTickTimestamp, NULL );
506
#endif
507
}
508
 
509
unsigned int RtApi :: getStreamSampleRate( void )
510
{
511
 verifyStream();
512
 
513
 return stream_.sampleRate;
514
}
515
 
516
 
517
// *************************************************** //
518
//
519
// OS/API-specific methods.
520
//
521
// *************************************************** //
522
 
523
#if defined(__MACOSX_CORE__)
524
 
525
#include <unistd.h>
526
 
527
// The OS X CoreAudio API is designed to use a separate callback
528
// procedure for each of its audio devices.  A single RtAudio duplex
529
// stream using two different devices is supported here, though it
530
// cannot be guaranteed to always behave correctly because we cannot
531
// synchronize these two callbacks.
532
//
533
// A property listener is installed for over/underrun information.
534
// However, no functionality is currently provided to allow property
535
// listeners to trigger user handlers because it is unclear what could
536
// be done if a critical stream parameter (buffer size, sample rate,
537
// device disconnect) notification arrived.  The listeners entail
538
// quite a bit of extra code and most likely, a user program wouldn't
539
// be prepared for the result anyway.  However, we do provide a flag
540
// to the client callback function to inform of an over/underrun.
541
 
542
// A structure to hold various information related to the CoreAudio API
543
// implementation.
544
struct CoreHandle {
545
  AudioDeviceID id[2];    // device ids
546
#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
547
  AudioDeviceIOProcID procId[2];
548
#endif
549
  UInt32 iStream[2];      // device stream index (or first if using multiple)
550
  UInt32 nStreams[2];     // number of streams to use
551
  bool xrun[2];
552
  char *deviceBuffer;
553
  pthread_cond_t condition;
554
  int drainCounter;       // Tracks callback counts when draining
555
  bool internalDrain;     // Indicates if stop is initiated from callback or not.
556
 
557
  CoreHandle()
558
    :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
559
};
560
 
561
RtApiCore:: RtApiCore()
562
{
563
#if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
564
  // This is a largely undocumented but absolutely necessary
565
  // requirement starting with OS-X 10.6.  If not called, queries and
566
  // updates to various audio device properties are not handled
567
  // correctly.
568
  CFRunLoopRef theRunLoop = NULL;
569
  AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
570
                                          kAudioObjectPropertyScopeGlobal,
571
                                          kAudioObjectPropertyElementMaster };
572
  OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
573
  if ( result != noErr ) {
574
    errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
575
    error( RtAudioError::WARNING );
576
  }
577
#endif
578
}
579
 
580
RtApiCore :: ~RtApiCore()
581
{
582
  // The subclass destructor gets called before the base class
583
  // destructor, so close an existing stream before deallocating
584
  // apiDeviceId memory.
585
  if ( stream_.state != STREAM_CLOSED ) closeStream();
586
}
587
 
588
unsigned int RtApiCore :: getDeviceCount( void )
589
{
590
  // Find out how many audio devices there are, if any.
591
  UInt32 dataSize;
592
  AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
593
  OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
594
  if ( result != noErr ) {
595
    errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
596
    error( RtAudioError::WARNING );
597
    return 0;
598
  }
599
 
600
  return dataSize / sizeof( AudioDeviceID );
601
}
602
 
603
unsigned int RtApiCore :: getDefaultInputDevice( void )
604
{
605
  unsigned int nDevices = getDeviceCount();
606
  if ( nDevices <= 1 ) return 0;
607
 
608
  AudioDeviceID id;
609
  UInt32 dataSize = sizeof( AudioDeviceID );
610
  AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
611
  OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
612
  if ( result != noErr ) {
613
    errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
614
    error( RtAudioError::WARNING );
615
    return 0;
616
  }
617
 
618
  dataSize *= nDevices;
619
  AudioDeviceID deviceList[ nDevices ];
620
  property.mSelector = kAudioHardwarePropertyDevices;
621
  result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
622
  if ( result != noErr ) {
623
    errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
624
    error( RtAudioError::WARNING );
625
    return 0;
626
  }
627
 
628
  for ( unsigned int i=0; i<nDevices; i++ )
629
    if ( id == deviceList[i] ) return i;
630
 
631
  errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
632
  error( RtAudioError::WARNING );
633
  return 0;
634
}
635
 
636
unsigned int RtApiCore :: getDefaultOutputDevice( void )
637
{
638
  unsigned int nDevices = getDeviceCount();
639
  if ( nDevices <= 1 ) return 0;
640
 
641
  AudioDeviceID id;
642
  UInt32 dataSize = sizeof( AudioDeviceID );
643
  AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
644
  OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
645
  if ( result != noErr ) {
646
    errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
647
    error( RtAudioError::WARNING );
648
    return 0;
649
  }
650
 
651
  dataSize = sizeof( AudioDeviceID ) * nDevices;
652
  AudioDeviceID deviceList[ nDevices ];
653
  property.mSelector = kAudioHardwarePropertyDevices;
654
  result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
655
  if ( result != noErr ) {
656
    errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
657
    error( RtAudioError::WARNING );
658
    return 0;
659
  }
660
 
661
  for ( unsigned int i=0; i<nDevices; i++ )
662
    if ( id == deviceList[i] ) return i;
663
 
664
  errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
665
  error( RtAudioError::WARNING );
666
  return 0;
667
}
668
 
669
RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
670
{
671
  RtAudio::DeviceInfo info;
672
  info.probed = false;
673
 
674
  // Get device ID
675
  unsigned int nDevices = getDeviceCount();
676
  if ( nDevices == 0 ) {
677
    errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
678
    error( RtAudioError::INVALID_USE );
679
    return info;
680
  }
681
 
682
  if ( device >= nDevices ) {
683
    errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
684
    error( RtAudioError::INVALID_USE );
685
    return info;
686
  }
687
 
688
  AudioDeviceID deviceList[ nDevices ];
689
  UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
690
  AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
691
                                          kAudioObjectPropertyScopeGlobal,
692
                                          kAudioObjectPropertyElementMaster };
693
  OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
694
                                                0, NULL, &dataSize, (void *) &deviceList );
695
  if ( result != noErr ) {
696
    errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
697
    error( RtAudioError::WARNING );
698
    return info;
699
  }
700
 
701
  AudioDeviceID id = deviceList[ device ];
702
 
703
  // Get the device name.
704
  info.name.erase();
705
  CFStringRef cfname;
706
  dataSize = sizeof( CFStringRef );
707
  property.mSelector = kAudioObjectPropertyManufacturer;
708
  result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
709
  if ( result != noErr ) {
710
    errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
711
    errorText_ = errorStream_.str();
712
    error( RtAudioError::WARNING );
713
    return info;
714
  }
715
 
716
  //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
717
  int length = CFStringGetLength(cfname);
718
  char *mname = (char *)malloc(length * 3 + 1);
719
#if defined( UNICODE ) || defined( _UNICODE )
720
  CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
721
#else
722
  CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
723
#endif
724
  info.name.append( (const char *)mname, strlen(mname) );
725
  info.name.append( ": " );
726
  CFRelease( cfname );
727
  free(mname);
728
 
729
  property.mSelector = kAudioObjectPropertyName;
730
  result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
731
  if ( result != noErr ) {
732
    errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
733
    errorText_ = errorStream_.str();
734
    error( RtAudioError::WARNING );
735
    return info;
736
  }
737
 
738
  //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
739
  length = CFStringGetLength(cfname);
740
  char *name = (char *)malloc(length * 3 + 1);
741
#if defined( UNICODE ) || defined( _UNICODE )
742
  CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
743
#else
744
  CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
745
#endif
746
  info.name.append( (const char *)name, strlen(name) );
747
  CFRelease( cfname );
748
  free(name);
749
 
750
  // Get the output stream "configuration".
751
  AudioBufferList *bufferList = nil;
752
  property.mSelector = kAudioDevicePropertyStreamConfiguration;
753
  property.mScope = kAudioDevicePropertyScopeOutput;
754
  //  property.mElement = kAudioObjectPropertyElementWildcard;
755
  dataSize = 0;
756
  result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
757
  if ( result != noErr || dataSize == 0 ) {
758
    errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
759
    errorText_ = errorStream_.str();
760
    error( RtAudioError::WARNING );
761
    return info;
762
  }
763
 
764
  // Allocate the AudioBufferList.
765
  bufferList = (AudioBufferList *) malloc( dataSize );
766
  if ( bufferList == NULL ) {
767
    errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
768
    error( RtAudioError::WARNING );
769
    return info;
770
  }
771
 
772
  result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
773
  if ( result != noErr || dataSize == 0 ) {
774
    free( bufferList );
775
    errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
776
    errorText_ = errorStream_.str();
777
    error( RtAudioError::WARNING );
778
    return info;
779
  }
780
 
781
  // Get output channel information.
782
  unsigned int i, nStreams = bufferList->mNumberBuffers;
783
  for ( i=0; i<nStreams; i++ )
784
    info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
785
  free( bufferList );
786
 
787
  // Get the input stream "configuration".
788
  property.mScope = kAudioDevicePropertyScopeInput;
789
  result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
790
  if ( result != noErr || dataSize == 0 ) {
791
    errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
792
    errorText_ = errorStream_.str();
793
    error( RtAudioError::WARNING );
794
    return info;
795
  }
796
 
797
  // Allocate the AudioBufferList.
798
  bufferList = (AudioBufferList *) malloc( dataSize );
799
  if ( bufferList == NULL ) {
800
    errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
801
    error( RtAudioError::WARNING );
802
    return info;
803
  }
804
 
805
  result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
806
  if (result != noErr || dataSize == 0) {
807
    free( bufferList );
808
    errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
809
    errorText_ = errorStream_.str();
810
    error( RtAudioError::WARNING );
811
    return info;
812
  }
813
 
814
  // Get input channel information.
815
  nStreams = bufferList->mNumberBuffers;
816
  for ( i=0; i<nStreams; i++ )
817
    info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
818
  free( bufferList );
819
 
820
  // If device opens for both playback and capture, we determine the channels.
821
  if ( info.outputChannels > 0 && info.inputChannels > 0 )
822
    info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
823
 
824
  // Probe the device sample rates.
825
  bool isInput = false;
826
  if ( info.outputChannels == 0 ) isInput = true;
827
 
828
  // Determine the supported sample rates.
829
  property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
830
  if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
831
  result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
832
  if ( result != kAudioHardwareNoError || dataSize == 0 ) {
833
    errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
834
    errorText_ = errorStream_.str();
835
    error( RtAudioError::WARNING );
836
    return info;
837
  }
838
 
839
  UInt32 nRanges = dataSize / sizeof( AudioValueRange );
840
  AudioValueRange rangeList[ nRanges ];
841
  result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
842
  if ( result != kAudioHardwareNoError ) {
843
    errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
844
    errorText_ = errorStream_.str();
845
    error( RtAudioError::WARNING );
846
    return info;
847
  }
848
 
849
  // The sample rate reporting mechanism is a bit of a mystery.  It
850
  // seems that it can either return individual rates or a range of
851
  // rates.  I assume that if the min / max range values are the same,
852
  // then that represents a single supported rate and if the min / max
853
  // range values are different, the device supports an arbitrary
854
  // range of values (though there might be multiple ranges, so we'll
855
  // use the most conservative range).
856
  Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
857
  bool haveValueRange = false;
858
  info.sampleRates.clear();
859
  for ( UInt32 i=0; i<nRanges; i++ ) {
860
    if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
861
      unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
862
      info.sampleRates.push_back( tmpSr );
863
 
864
      if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
865
        info.preferredSampleRate = tmpSr;
866
 
867
    } else {
868
      haveValueRange = true;
869
      if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
870
      if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
871
    }
872
  }
873
 
874
  if ( haveValueRange ) {
875
    for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
876
      if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
877
        info.sampleRates.push_back( SAMPLE_RATES[k] );
878
 
879
        if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
880
          info.preferredSampleRate = SAMPLE_RATES[k];
881
      }
882
    }
883
  }
884
 
885
  // Sort and remove any redundant values
886
  std::sort( info.sampleRates.begin(), info.sampleRates.end() );
887
  info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
888
 
889
  if ( info.sampleRates.size() == 0 ) {
890
    errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
891
    errorText_ = errorStream_.str();
892
    error( RtAudioError::WARNING );
893
    return info;
894
  }
895
 
896
  // CoreAudio always uses 32-bit floating point data for PCM streams.
897
  // Thus, any other "physical" formats supported by the device are of
898
  // no interest to the client.
899
  info.nativeFormats = RTAUDIO_FLOAT32;
900
 
901
  if ( info.outputChannels > 0 )
902
    if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
903
  if ( info.inputChannels > 0 )
904
    if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
905
 
906
  info.probed = true;
907
  return info;
908
}
909
 
910
static OSStatus callbackHandler( AudioDeviceID inDevice,
911
                                 const AudioTimeStamp* /*inNow*/,
912
                                 const AudioBufferList* inInputData,
913
                                 const AudioTimeStamp* /*inInputTime*/,
914
                                 AudioBufferList* outOutputData,
915
                                 const AudioTimeStamp* /*inOutputTime*/,
916
                                 void* infoPointer )
917
{
918
  CallbackInfo *info = (CallbackInfo *) infoPointer;
919
 
920
  RtApiCore *object = (RtApiCore *) info->object;
921
  if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
922
    return kAudioHardwareUnspecifiedError;
923
  else
924
    return kAudioHardwareNoError;
925
}
926
 
927
static OSStatus xrunListener( AudioObjectID /*inDevice*/,
928
                              UInt32 nAddresses,
929
                              const AudioObjectPropertyAddress properties[],
930
                              void* handlePointer )
931
{
932
  CoreHandle *handle = (CoreHandle *) handlePointer;
933
  for ( UInt32 i=0; i<nAddresses; i++ ) {
934
    if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
935
      if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
936
        handle->xrun[1] = true;
937
      else
938
        handle->xrun[0] = true;
939
    }
940
  }
941
 
942
  return kAudioHardwareNoError;
943
}
944
 
945
static OSStatus rateListener( AudioObjectID inDevice,
946
                              UInt32 /*nAddresses*/,
947
                              const AudioObjectPropertyAddress /*properties*/[],
948
                              void* ratePointer )
949
{
950
  Float64 *rate = (Float64 *) ratePointer;
951
  UInt32 dataSize = sizeof( Float64 );
952
  AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
953
                                          kAudioObjectPropertyScopeGlobal,
954
                                          kAudioObjectPropertyElementMaster };
955
  AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
956
  return kAudioHardwareNoError;
957
}
958
 
959
bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
960
                                   unsigned int firstChannel, unsigned int sampleRate,
961
                                   RtAudioFormat format, unsigned int *bufferSize,
962
                                   RtAudio::StreamOptions *options )
963
{
964
  // Get device ID
965
  unsigned int nDevices = getDeviceCount();
966
  if ( nDevices == 0 ) {
967
    // This should not happen because a check is made before this function is called.
968
    errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
969
    return FAILURE;
970
  }
971
 
972
  if ( device >= nDevices ) {
973
    // This should not happen because a check is made before this function is called.
974
    errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
975
    return FAILURE;
976
  }
977
 
978
  AudioDeviceID deviceList[ nDevices ];
979
  UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
980
  AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
981
                                          kAudioObjectPropertyScopeGlobal,
982
                                          kAudioObjectPropertyElementMaster };
983
  OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
984
                                                0, NULL, &dataSize, (void *) &deviceList );
985
  if ( result != noErr ) {
986
    errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
987
    return FAILURE;
988
  }
989
 
990
  AudioDeviceID id = deviceList[ device ];
991
 
992
  // Setup for stream mode.
993
  bool isInput = false;
994
  if ( mode == INPUT ) {
995
    isInput = true;
996
    property.mScope = kAudioDevicePropertyScopeInput;
997
  }
998
  else
999
    property.mScope = kAudioDevicePropertyScopeOutput;
1000
 
1001
  // Get the stream "configuration".
1002
  AudioBufferList *bufferList = nil;
1003
  dataSize = 0;
1004
  property.mSelector = kAudioDevicePropertyStreamConfiguration;
1005
  result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1006
  if ( result != noErr || dataSize == 0 ) {
1007
    errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1008
    errorText_ = errorStream_.str();
1009
    return FAILURE;
1010
  }
1011
 
1012
  // Allocate the AudioBufferList.
1013
  bufferList = (AudioBufferList *) malloc( dataSize );
1014
  if ( bufferList == NULL ) {
1015
    errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1016
    return FAILURE;
1017
  }
1018
 
1019
  result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1020
  if (result != noErr || dataSize == 0) {
1021
    free( bufferList );
1022
    errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1023
    errorText_ = errorStream_.str();
1024
    return FAILURE;
1025
  }
1026
 
1027
  // Search for one or more streams that contain the desired number of
1028
  // channels. CoreAudio devices can have an arbitrary number of
1029
  // streams and each stream can have an arbitrary number of channels.
1030
  // For each stream, a single buffer of interleaved samples is
1031
  // provided.  RtAudio prefers the use of one stream of interleaved
1032
  // data or multiple consecutive single-channel streams.  However, we
1033
  // now support multiple consecutive multi-channel streams of
1034
  // interleaved data as well.
1035
  UInt32 iStream, offsetCounter = firstChannel;
1036
  UInt32 nStreams = bufferList->mNumberBuffers;
1037
  bool monoMode = false;
1038
  bool foundStream = false;
1039
 
1040
  // First check that the device supports the requested number of
1041
  // channels.
1042
  UInt32 deviceChannels = 0;
1043
  for ( iStream=0; iStream<nStreams; iStream++ )
1044
    deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1045
 
1046
  if ( deviceChannels < ( channels + firstChannel ) ) {
1047
    free( bufferList );
1048
    errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1049
    errorText_ = errorStream_.str();
1050
    return FAILURE;
1051
  }
1052
 
1053
  // Look for a single stream meeting our needs.
1054
  UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1055
  for ( iStream=0; iStream<nStreams; iStream++ ) {
1056
    streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1057
    if ( streamChannels >= channels + offsetCounter ) {
1058
      firstStream = iStream;
1059
      channelOffset = offsetCounter;
1060
      foundStream = true;
1061
      break;
1062
    }
1063
    if ( streamChannels > offsetCounter ) break;
1064
    offsetCounter -= streamChannels;
1065
  }
1066
 
1067
  // If we didn't find a single stream above, then we should be able
1068
  // to meet the channel specification with multiple streams.
1069
  if ( foundStream == false ) {
1070
    monoMode = true;
1071
    offsetCounter = firstChannel;
1072
    for ( iStream=0; iStream<nStreams; iStream++ ) {
1073
      streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1074
      if ( streamChannels > offsetCounter ) break;
1075
      offsetCounter -= streamChannels;
1076
    }
1077
 
1078
    firstStream = iStream;
1079
    channelOffset = offsetCounter;
1080
    Int32 channelCounter = channels + offsetCounter - streamChannels;
1081
 
1082
    if ( streamChannels > 1 ) monoMode = false;
1083
    while ( channelCounter > 0 ) {
1084
      streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1085
      if ( streamChannels > 1 ) monoMode = false;
1086
      channelCounter -= streamChannels;
1087
      streamCount++;
1088
    }
1089
  }
1090
 
1091
  free( bufferList );
1092
 
1093
  // Determine the buffer size.
1094
  AudioValueRange bufferRange;
1095
  dataSize = sizeof( AudioValueRange );
1096
  property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1097
  result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1098
 
1099
  if ( result != noErr ) {
1100
    errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1101
    errorText_ = errorStream_.str();
1102
    return FAILURE;
1103
  }
1104
 
1105
  if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1106
  else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1107
  if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1108
 
1109
  // Set the buffer size.  For multiple streams, I'm assuming we only
1110
  // need to make this setting for the master channel.
1111
  UInt32 theSize = (UInt32) *bufferSize;
1112
  dataSize = sizeof( UInt32 );
1113
  property.mSelector = kAudioDevicePropertyBufferFrameSize;
1114
  result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1115
 
1116
  if ( result != noErr ) {
1117
    errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1118
    errorText_ = errorStream_.str();
1119
    return FAILURE;
1120
  }
1121
 
1122
  // If attempting to setup a duplex stream, the bufferSize parameter
1123
  // MUST be the same in both directions!
1124
  *bufferSize = theSize;
1125
  if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1126
    errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1127
    errorText_ = errorStream_.str();
1128
    return FAILURE;
1129
  }
1130
 
1131
  stream_.bufferSize = *bufferSize;
1132
  stream_.nBuffers = 1;
1133
 
1134
  // Try to set "hog" mode ... it's not clear to me this is working.
1135
  if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1136
    pid_t hog_pid;
1137
    dataSize = sizeof( hog_pid );
1138
    property.mSelector = kAudioDevicePropertyHogMode;
1139
    result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1140
    if ( result != noErr ) {
1141
      errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1142
      errorText_ = errorStream_.str();
1143
      return FAILURE;
1144
    }
1145
 
1146
    if ( hog_pid != getpid() ) {
1147
      hog_pid = getpid();
1148
      result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1149
      if ( result != noErr ) {
1150
        errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1151
        errorText_ = errorStream_.str();
1152
        return FAILURE;
1153
      }
1154
    }
1155
  }
1156
 
1157
  // Check and if necessary, change the sample rate for the device.
1158
  Float64 nominalRate;
1159
  dataSize = sizeof( Float64 );
1160
  property.mSelector = kAudioDevicePropertyNominalSampleRate;
1161
  result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1162
  if ( result != noErr ) {
1163
    errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1164
    errorText_ = errorStream_.str();
1165
    return FAILURE;
1166
  }
1167
 
1168
  // Only change the sample rate if off by more than 1 Hz.
1169
  if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1170
 
1171
    // Set a property listener for the sample rate change
1172
    Float64 reportedRate = 0.0;
1173
    AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1174
    result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1175
    if ( result != noErr ) {
1176
      errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1177
      errorText_ = errorStream_.str();
1178
      return FAILURE;
1179
    }
1180
 
1181
    nominalRate = (Float64) sampleRate;
1182
    result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1183
    if ( result != noErr ) {
1184
      AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1185
      errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1186
      errorText_ = errorStream_.str();
1187
      return FAILURE;
1188
    }
1189
 
1190
    // Now wait until the reported nominal rate is what we just set.
1191
    UInt32 microCounter = 0;
1192
    while ( reportedRate != nominalRate ) {
1193
      microCounter += 5000;
1194
      if ( microCounter > 5000000 ) break;
1195
      usleep( 5000 );
1196
    }
1197
 
1198
    // Remove the property listener.
1199
    AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1200
 
1201
    if ( microCounter > 5000000 ) {
1202
      errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1203
      errorText_ = errorStream_.str();
1204
      return FAILURE;
1205
    }
1206
  }
1207
 
1208
  // Now set the stream format for all streams.  Also, check the
1209
  // physical format of the device and change that if necessary.
1210
  AudioStreamBasicDescription description;
1211
  dataSize = sizeof( AudioStreamBasicDescription );
1212
  property.mSelector = kAudioStreamPropertyVirtualFormat;
1213
  result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1214
  if ( result != noErr ) {
1215
    errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1216
    errorText_ = errorStream_.str();
1217
    return FAILURE;
1218
  }
1219
 
1220
  // Set the sample rate and data format id.  However, only make the
1221
  // change if the sample rate is not within 1.0 of the desired
1222
  // rate and the format is not linear pcm.
1223
  bool updateFormat = false;
1224
  if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1225
    description.mSampleRate = (Float64) sampleRate;
1226
    updateFormat = true;
1227
  }
1228
 
1229
  if ( description.mFormatID != kAudioFormatLinearPCM ) {
1230
    description.mFormatID = kAudioFormatLinearPCM;
1231
    updateFormat = true;
1232
  }
1233
 
1234
  if ( updateFormat ) {
1235
    result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1236
    if ( result != noErr ) {
1237
      errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1238
      errorText_ = errorStream_.str();
1239
      return FAILURE;
1240
    }
1241
  }
1242
 
1243
  // Now check the physical format.
1244
  property.mSelector = kAudioStreamPropertyPhysicalFormat;
1245
  result = AudioObjectGetPropertyData( id, &property, 0, NULL,  &dataSize, &description );
1246
  if ( result != noErr ) {
1247
    errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1248
    errorText_ = errorStream_.str();
1249
    return FAILURE;
1250
  }
1251
 
1252
  //std::cout << "Current physical stream format:" << std::endl;
1253
  //std::cout << "   mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1254
  //std::cout << "   aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1255
  //std::cout << "   bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1256
  //std::cout << "   sample rate = " << description.mSampleRate << std::endl;
1257
 
1258
  if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1259
    description.mFormatID = kAudioFormatLinearPCM;
1260
    //description.mSampleRate = (Float64) sampleRate;
1261
    AudioStreamBasicDescription testDescription = description;
1262
    UInt32 formatFlags;
1263
 
1264
    // We'll try higher bit rates first and then work our way down.
1265
    std::vector< std::pair<UInt32, UInt32>  > physicalFormats;
1266
    formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1267
    physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1268
    formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1269
    physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1270
    physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) );   // 24-bit packed
1271
    formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1272
    physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1273
    formatFlags |= kAudioFormatFlagIsAlignedHigh;
1274
    physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1275
    formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1276
    physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1277
    physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1278
 
1279
    bool setPhysicalFormat = false;
1280
    for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1281
      testDescription = description;
1282
      testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1283
      testDescription.mFormatFlags = physicalFormats[i].second;
1284
      if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1285
        testDescription.mBytesPerFrame =  4 * testDescription.mChannelsPerFrame;
1286
      else
1287
        testDescription.mBytesPerFrame =  testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1288
      testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1289
      result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1290
      if ( result == noErr ) {
1291
        setPhysicalFormat = true;
1292
        //std::cout << "Updated physical stream format:" << std::endl;
1293
        //std::cout << "   mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1294
        //std::cout << "   aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1295
        //std::cout << "   bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1296
        //std::cout << "   sample rate = " << testDescription.mSampleRate << std::endl;
1297
        break;
1298
      }
1299
    }
1300
 
1301
    if ( !setPhysicalFormat ) {
1302
      errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1303
      errorText_ = errorStream_.str();
1304
      return FAILURE;
1305
    }
1306
  } // done setting virtual/physical formats.
1307
 
1308
  // Get the stream / device latency.
1309
  UInt32 latency;
1310
  dataSize = sizeof( UInt32 );
1311
  property.mSelector = kAudioDevicePropertyLatency;
1312
  if ( AudioObjectHasProperty( id, &property ) == true ) {
1313
    result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1314
    if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1315
    else {
1316
      errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1317
      errorText_ = errorStream_.str();
1318
      error( RtAudioError::WARNING );
1319
    }
1320
  }
1321
 
1322
  // Byte-swapping: According to AudioHardware.h, the stream data will
1323
  // always be presented in native-endian format, so we should never
1324
  // need to byte swap.
1325
  stream_.doByteSwap[mode] = false;
1326
 
1327
  // From the CoreAudio documentation, PCM data must be supplied as
1328
  // 32-bit floats.
1329
  stream_.userFormat = format;
1330
  stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1331
 
1332
  if ( streamCount == 1 )
1333
    stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1334
  else // multiple streams
1335
    stream_.nDeviceChannels[mode] = channels;
1336
  stream_.nUserChannels[mode] = channels;
1337
  stream_.channelOffset[mode] = channelOffset;  // offset within a CoreAudio stream
1338
  if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1339
  else stream_.userInterleaved = true;
1340
  stream_.deviceInterleaved[mode] = true;
1341
  if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1342
 
1343
  // Set flags for buffer conversion.
1344
  stream_.doConvertBuffer[mode] = false;
1345
  if ( stream_.userFormat != stream_.deviceFormat[mode] )
1346
    stream_.doConvertBuffer[mode] = true;
1347
  if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1348
    stream_.doConvertBuffer[mode] = true;
1349
  if ( streamCount == 1 ) {
1350
    if ( stream_.nUserChannels[mode] > 1 &&
1351
         stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1352
      stream_.doConvertBuffer[mode] = true;
1353
  }
1354
  else if ( monoMode && stream_.userInterleaved )
1355
    stream_.doConvertBuffer[mode] = true;
1356
 
1357
  // Allocate our CoreHandle structure for the stream.
1358
  CoreHandle *handle = 0;
1359
  if ( stream_.apiHandle == 0 ) {
1360
    try {
1361
      handle = new CoreHandle;
1362
    }
1363
    catch ( std::bad_alloc& ) {
1364
      errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1365
      goto error;
1366
    }
1367
 
1368
    if ( pthread_cond_init( &handle->condition, NULL ) ) {
1369
      errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1370
      goto error;
1371
    }
1372
    stream_.apiHandle = (void *) handle;
1373
  }
1374
  else
1375
    handle = (CoreHandle *) stream_.apiHandle;
1376
  handle->iStream[mode] = firstStream;
1377
  handle->nStreams[mode] = streamCount;
1378
  handle->id[mode] = id;
1379
 
1380
  // Allocate necessary internal buffers.
1381
  unsigned long bufferBytes;
1382
  bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1383
  //  stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1384
  stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1385
  memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1386
  if ( stream_.userBuffer[mode] == NULL ) {
1387
    errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1388
    goto error;
1389
  }
1390
 
1391
  // If possible, we will make use of the CoreAudio stream buffers as
1392
  // "device buffers".  However, we can't do this if using multiple
1393
  // streams.
1394
  if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1395
 
1396
    bool makeBuffer = true;
1397
    bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1398
    if ( mode == INPUT ) {
1399
      if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1400
        unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1401
        if ( bufferBytes <= bytesOut ) makeBuffer = false;
1402
      }
1403
    }
1404
 
1405
    if ( makeBuffer ) {
1406
      bufferBytes *= *bufferSize;
1407
      if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1408
      stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1409
      if ( stream_.deviceBuffer == NULL ) {
1410
        errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1411
        goto error;
1412
      }
1413
    }
1414
  }
1415
 
1416
  stream_.sampleRate = sampleRate;
1417
  stream_.device[mode] = device;
1418
  stream_.state = STREAM_STOPPED;
1419
  stream_.callbackInfo.object = (void *) this;
1420
 
1421
  // Setup the buffer conversion information structure.
1422
  if ( stream_.doConvertBuffer[mode] ) {
1423
    if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1424
    else setConvertInfo( mode, channelOffset );
1425
  }
1426
 
1427
  if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1428
    // Only one callback procedure per device.
1429
    stream_.mode = DUPLEX;
1430
  else {
1431
#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1432
    result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1433
#else
1434
    // deprecated in favor of AudioDeviceCreateIOProcID()
1435
    result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1436
#endif
1437
    if ( result != noErr ) {
1438
      errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1439
      errorText_ = errorStream_.str();
1440
      goto error;
1441
    }
1442
    if ( stream_.mode == OUTPUT && mode == INPUT )
1443
      stream_.mode = DUPLEX;
1444
    else
1445
      stream_.mode = mode;
1446
  }
1447
 
1448
  // Setup the device property listener for over/underload.
1449
  property.mSelector = kAudioDeviceProcessorOverload;
1450
  property.mScope = kAudioObjectPropertyScopeGlobal;
1451
  result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1452
 
1453
  return SUCCESS;
1454
 
1455
 error:
1456
  if ( handle ) {
1457
    pthread_cond_destroy( &handle->condition );
1458
    delete handle;
1459
    stream_.apiHandle = 0;
1460
  }
1461
 
1462
  for ( int i=0; i<2; i++ ) {
1463
    if ( stream_.userBuffer[i] ) {
1464
      free( stream_.userBuffer[i] );
1465
      stream_.userBuffer[i] = 0;
1466
    }
1467
  }
1468
 
1469
  if ( stream_.deviceBuffer ) {
1470
    free( stream_.deviceBuffer );
1471
    stream_.deviceBuffer = 0;
1472
  }
1473
 
1474
  stream_.state = STREAM_CLOSED;
1475
  return FAILURE;
1476
}
1477
 
1478
void RtApiCore :: closeStream( void )
1479
{
1480
  if ( stream_.state == STREAM_CLOSED ) {
1481
    errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1482
    error( RtAudioError::WARNING );
1483
    return;
1484
  }
1485
 
1486
  CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1487
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1488
    if (handle) {
1489
      AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1490
        kAudioObjectPropertyScopeGlobal,
1491
        kAudioObjectPropertyElementMaster };
1492
 
1493
      property.mSelector = kAudioDeviceProcessorOverload;
1494
      property.mScope = kAudioObjectPropertyScopeGlobal;
1495
      if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1496
        errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1497
        error( RtAudioError::WARNING );
1498
      }
1499
 
1500
#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1501
      if ( stream_.state == STREAM_RUNNING )
1502
        AudioDeviceStop( handle->id[0], handle->procId[0] );
1503
      AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1504
#else // deprecated behaviour
1505
      if ( stream_.state == STREAM_RUNNING )
1506
        AudioDeviceStop( handle->id[0], callbackHandler );
1507
      AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1508
#endif
1509
    }
1510
  }
1511
 
1512
  if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1513
    if (handle) {
1514
      AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1515
        kAudioObjectPropertyScopeGlobal,
1516
        kAudioObjectPropertyElementMaster };
1517
 
1518
      property.mSelector = kAudioDeviceProcessorOverload;
1519
      property.mScope = kAudioObjectPropertyScopeGlobal;
1520
      if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1521
        errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1522
        error( RtAudioError::WARNING );
1523
      }
1524
 
1525
#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1526
      if ( stream_.state == STREAM_RUNNING )
1527
        AudioDeviceStop( handle->id[1], handle->procId[1] );
1528
      AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1529
#else // deprecated behaviour
1530
      if ( stream_.state == STREAM_RUNNING )
1531
        AudioDeviceStop( handle->id[1], callbackHandler );
1532
      AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1533
#endif
1534
    }
1535
  }
1536
 
1537
  for ( int i=0; i<2; i++ ) {
1538
    if ( stream_.userBuffer[i] ) {
1539
      free( stream_.userBuffer[i] );
1540
      stream_.userBuffer[i] = 0;
1541
    }
1542
  }
1543
 
1544
  if ( stream_.deviceBuffer ) {
1545
    free( stream_.deviceBuffer );
1546
    stream_.deviceBuffer = 0;
1547
  }
1548
 
1549
  // Destroy pthread condition variable.
1550
  pthread_cond_destroy( &handle->condition );
1551
  delete handle;
1552
  stream_.apiHandle = 0;
1553
 
1554
  stream_.mode = UNINITIALIZED;
1555
  stream_.state = STREAM_CLOSED;
1556
}
1557
 
1558
void RtApiCore :: startStream( void )
1559
{
1560
  verifyStream();
1561
  if ( stream_.state == STREAM_RUNNING ) {
1562
    errorText_ = "RtApiCore::startStream(): the stream is already running!";
1563
    error( RtAudioError::WARNING );
1564
    return;
1565
  }
1566
 
1567
#if defined( HAVE_GETTIMEOFDAY )
1568
  gettimeofday( &stream_.lastTickTimestamp, NULL );
1569
#endif
1570
 
1571
  OSStatus result = noErr;
1572
  CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1573
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1574
 
1575
#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1576
    result = AudioDeviceStart( handle->id[0], handle->procId[0] );
1577
#else // deprecated behaviour
1578
    result = AudioDeviceStart( handle->id[0], callbackHandler );
1579
#endif
1580
    if ( result != noErr ) {
1581
      errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1582
      errorText_ = errorStream_.str();
1583
      goto unlock;
1584
    }
1585
  }
1586
 
1587
  if ( stream_.mode == INPUT ||
1588
       ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1589
 
1590
#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1591
    result = AudioDeviceStart( handle->id[1], handle->procId[1] );
1592
#else // deprecated behaviour
1593
    result = AudioDeviceStart( handle->id[1], callbackHandler );
1594
#endif
1595
    if ( result != noErr ) {
1596
      errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1597
      errorText_ = errorStream_.str();
1598
      goto unlock;
1599
    }
1600
  }
1601
 
1602
  handle->drainCounter = 0;
1603
  handle->internalDrain = false;
1604
  stream_.state = STREAM_RUNNING;
1605
 
1606
 unlock:
1607
  if ( result == noErr ) return;
1608
  error( RtAudioError::SYSTEM_ERROR );
1609
}
1610
 
1611
void RtApiCore :: stopStream( void )
1612
{
1613
  verifyStream();
1614
  if ( stream_.state == STREAM_STOPPED ) {
1615
    errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1616
    error( RtAudioError::WARNING );
1617
    return;
1618
  }
1619
 
1620
  OSStatus result = noErr;
1621
  CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1622
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1623
 
1624
    if ( handle->drainCounter == 0 ) {
1625
      handle->drainCounter = 2;
1626
      pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1627
    }
1628
 
1629
#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1630
    result = AudioDeviceStop( handle->id[0], handle->procId[0] );
1631
#else // deprecated behaviour
1632
    result = AudioDeviceStop( handle->id[0], callbackHandler );
1633
#endif
1634
    if ( result != noErr ) {
1635
      errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1636
      errorText_ = errorStream_.str();
1637
      goto unlock;
1638
    }
1639
  }
1640
 
1641
  if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1642
 
1643
#if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1644
    result = AudioDeviceStop( handle->id[1], handle->procId[1] );
1645
#else  // deprecated behaviour
1646
    result = AudioDeviceStop( handle->id[1], callbackHandler );
1647
#endif
1648
    if ( result != noErr ) {
1649
      errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1650
      errorText_ = errorStream_.str();
1651
      goto unlock;
1652
    }
1653
  }
1654
 
1655
  stream_.state = STREAM_STOPPED;
1656
 
1657
 unlock:
1658
  if ( result == noErr ) return;
1659
  error( RtAudioError::SYSTEM_ERROR );
1660
}
1661
 
1662
void RtApiCore :: abortStream( void )
1663
{
1664
  verifyStream();
1665
  if ( stream_.state == STREAM_STOPPED ) {
1666
    errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1667
    error( RtAudioError::WARNING );
1668
    return;
1669
  }
1670
 
1671
  CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1672
  handle->drainCounter = 2;
1673
 
1674
  stopStream();
1675
}
1676
 
1677
// This function will be called by a spawned thread when the user
1678
// callback function signals that the stream should be stopped or
1679
// aborted.  It is better to handle it this way because the
1680
// callbackEvent() function probably should return before the AudioDeviceStop()
1681
// function is called.
1682
static void *coreStopStream( void *ptr )
1683
{
1684
  CallbackInfo *info = (CallbackInfo *) ptr;
1685
  RtApiCore *object = (RtApiCore *) info->object;
1686
 
1687
  object->stopStream();
1688
  pthread_exit( NULL );
1689
}
1690
 
1691
bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1692
                                 const AudioBufferList *inBufferList,
1693
                                 const AudioBufferList *outBufferList )
1694
{
1695
  if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1696
  if ( stream_.state == STREAM_CLOSED ) {
1697
    errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1698
    error( RtAudioError::WARNING );
1699
    return FAILURE;
1700
  }
1701
 
1702
  CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1703
  CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1704
 
1705
  // Check if we were draining the stream and signal is finished.
1706
  if ( handle->drainCounter > 3 ) {
1707
    ThreadHandle threadId;
1708
 
1709
    stream_.state = STREAM_STOPPING;
1710
    if ( handle->internalDrain == true )
1711
      pthread_create( &threadId, NULL, coreStopStream, info );
1712
    else // external call to stopStream()
1713
      pthread_cond_signal( &handle->condition );
1714
    return SUCCESS;
1715
  }
1716
 
1717
  AudioDeviceID outputDevice = handle->id[0];
1718
 
1719
  // Invoke user callback to get fresh output data UNLESS we are
1720
  // draining stream or duplex mode AND the input/output devices are
1721
  // different AND this function is called for the input device.
1722
  if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1723
    RtAudioCallback callback = (RtAudioCallback) info->callback;
1724
    double streamTime = getStreamTime();
1725
    RtAudioStreamStatus status = 0;
1726
    if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1727
      status |= RTAUDIO_OUTPUT_UNDERFLOW;
1728
      handle->xrun[0] = false;
1729
    }
1730
    if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1731
      status |= RTAUDIO_INPUT_OVERFLOW;
1732
      handle->xrun[1] = false;
1733
    }
1734
 
1735
    int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1736
                                  stream_.bufferSize, streamTime, status, info->userData );
1737
    if ( cbReturnValue == 2 ) {
1738
      stream_.state = STREAM_STOPPING;
1739
      handle->drainCounter = 2;
1740
      abortStream();
1741
      return SUCCESS;
1742
    }
1743
    else if ( cbReturnValue == 1 ) {
1744
      handle->drainCounter = 1;
1745
      handle->internalDrain = true;
1746
    }
1747
  }
1748
 
1749
  if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1750
 
1751
    if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1752
 
1753
      if ( handle->nStreams[0] == 1 ) {
1754
        memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1755
                0,
1756
                outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1757
      }
1758
      else { // fill multiple streams with zeros
1759
        for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1760
          memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1761
                  0,
1762
                  outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1763
        }
1764
      }
1765
    }
1766
    else if ( handle->nStreams[0] == 1 ) {
1767
      if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1768
        convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1769
                       stream_.userBuffer[0], stream_.convertInfo[0] );
1770
      }
1771
      else { // copy from user buffer
1772
        memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1773
                stream_.userBuffer[0],
1774
                outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1775
      }
1776
    }
1777
    else { // fill multiple streams
1778
      Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1779
      if ( stream_.doConvertBuffer[0] ) {
1780
        convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1781
        inBuffer = (Float32 *) stream_.deviceBuffer;
1782
      }
1783
 
1784
      if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1785
        UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1786
        for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1787
          memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1788
                  (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1789
        }
1790
      }
1791
      else { // fill multiple multi-channel streams with interleaved data
1792
        UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1793
        Float32 *out, *in;
1794
 
1795
        bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1796
        UInt32 inChannels = stream_.nUserChannels[0];
1797
        if ( stream_.doConvertBuffer[0] ) {
1798
          inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1799
          inChannels = stream_.nDeviceChannels[0];
1800
        }
1801
 
1802
        if ( inInterleaved ) inOffset = 1;
1803
        else inOffset = stream_.bufferSize;
1804
 
1805
        channelsLeft = inChannels;
1806
        for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1807
          in = inBuffer;
1808
          out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1809
          streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1810
 
1811
          outJump = 0;
1812
          // Account for possible channel offset in first stream
1813
          if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1814
            streamChannels -= stream_.channelOffset[0];
1815
            outJump = stream_.channelOffset[0];
1816
            out += outJump;
1817
          }
1818
 
1819
          // Account for possible unfilled channels at end of the last stream
1820
          if ( streamChannels > channelsLeft ) {
1821
            outJump = streamChannels - channelsLeft;
1822
            streamChannels = channelsLeft;
1823
          }
1824
 
1825
          // Determine input buffer offsets and skips
1826
          if ( inInterleaved ) {
1827
            inJump = inChannels;
1828
            in += inChannels - channelsLeft;
1829
          }
1830
          else {
1831
            inJump = 1;
1832
            in += (inChannels - channelsLeft) * inOffset;
1833
          }
1834
 
1835
          for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1836
            for ( unsigned int j=0; j<streamChannels; j++ ) {
1837
              *out++ = in[j*inOffset];
1838
            }
1839
            out += outJump;
1840
            in += inJump;
1841
          }
1842
          channelsLeft -= streamChannels;
1843
        }
1844
      }
1845
    }
1846
  }
1847
 
1848
  // Don't bother draining input
1849
  if ( handle->drainCounter ) {
1850
    handle->drainCounter++;
1851
    goto unlock;
1852
  }
1853
 
1854
  AudioDeviceID inputDevice;
1855
  inputDevice = handle->id[1];
1856
  if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1857
 
1858
    if ( handle->nStreams[1] == 1 ) {
1859
      if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1860
        convertBuffer( stream_.userBuffer[1],
1861
                       (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1862
                       stream_.convertInfo[1] );
1863
      }
1864
      else { // copy to user buffer
1865
        memcpy( stream_.userBuffer[1],
1866
                inBufferList->mBuffers[handle->iStream[1]].mData,
1867
                inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1868
      }
1869
    }
1870
    else { // read from multiple streams
1871
      Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1872
      if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1873
 
1874
      if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1875
        UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1876
        for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1877
          memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1878
                  inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1879
        }
1880
      }
1881
      else { // read from multiple multi-channel streams
1882
        UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1883
        Float32 *out, *in;
1884
 
1885
        bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1886
        UInt32 outChannels = stream_.nUserChannels[1];
1887
        if ( stream_.doConvertBuffer[1] ) {
1888
          outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1889
          outChannels = stream_.nDeviceChannels[1];
1890
        }
1891
 
1892
        if ( outInterleaved ) outOffset = 1;
1893
        else outOffset = stream_.bufferSize;
1894
 
1895
        channelsLeft = outChannels;
1896
        for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1897
          out = outBuffer;
1898
          in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1899
          streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1900
 
1901
          inJump = 0;
1902
          // Account for possible channel offset in first stream
1903
          if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1904
            streamChannels -= stream_.channelOffset[1];
1905
            inJump = stream_.channelOffset[1];
1906
            in += inJump;
1907
          }
1908
 
1909
          // Account for possible unread channels at end of the last stream
1910
          if ( streamChannels > channelsLeft ) {
1911
            inJump = streamChannels - channelsLeft;
1912
            streamChannels = channelsLeft;
1913
          }
1914
 
1915
          // Determine output buffer offsets and skips
1916
          if ( outInterleaved ) {
1917
            outJump = outChannels;
1918
            out += outChannels - channelsLeft;
1919
          }
1920
          else {
1921
            outJump = 1;
1922
            out += (outChannels - channelsLeft) * outOffset;
1923
          }
1924
 
1925
          for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1926
            for ( unsigned int j=0; j<streamChannels; j++ ) {
1927
              out[j*outOffset] = *in++;
1928
            }
1929
            out += outJump;
1930
            in += inJump;
1931
          }
1932
          channelsLeft -= streamChannels;
1933
        }
1934
      }
1935
 
1936
      if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1937
        convertBuffer( stream_.userBuffer[1],
1938
                       stream_.deviceBuffer,
1939
                       stream_.convertInfo[1] );
1940
      }
1941
    }
1942
  }
1943
 
1944
 unlock:
1945
  //MUTEX_UNLOCK( &stream_.mutex );
1946
 
1947
  // Make sure to only tick duplex stream time once if using two devices
1948
  if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1949
    RtApi::tickStreamTime();
1950
 
1951
  return SUCCESS;
1952
}
1953
 
1954
const char* RtApiCore :: getErrorCode( OSStatus code )
1955
{
1956
  switch( code ) {
1957
 
1958
  case kAudioHardwareNotRunningError:
1959
    return "kAudioHardwareNotRunningError";
1960
 
1961
  case kAudioHardwareUnspecifiedError:
1962
    return "kAudioHardwareUnspecifiedError";
1963
 
1964
  case kAudioHardwareUnknownPropertyError:
1965
    return "kAudioHardwareUnknownPropertyError";
1966
 
1967
  case kAudioHardwareBadPropertySizeError:
1968
    return "kAudioHardwareBadPropertySizeError";
1969
 
1970
  case kAudioHardwareIllegalOperationError:
1971
    return "kAudioHardwareIllegalOperationError";
1972
 
1973
  case kAudioHardwareBadObjectError:
1974
    return "kAudioHardwareBadObjectError";
1975
 
1976
  case kAudioHardwareBadDeviceError:
1977
    return "kAudioHardwareBadDeviceError";
1978
 
1979
  case kAudioHardwareBadStreamError:
1980
    return "kAudioHardwareBadStreamError";
1981
 
1982
  case kAudioHardwareUnsupportedOperationError:
1983
    return "kAudioHardwareUnsupportedOperationError";
1984
 
1985
  case kAudioDeviceUnsupportedFormatError:
1986
    return "kAudioDeviceUnsupportedFormatError";
1987
 
1988
  case kAudioDevicePermissionsError:
1989
    return "kAudioDevicePermissionsError";
1990
 
1991
  default:
1992
    return "CoreAudio unknown error";
1993
  }
1994
}
1995
 
1996
  //******************** End of __MACOSX_CORE__ *********************//
1997
#endif
1998
 
1999
#if defined(__UNIX_JACK__)
2000
 
2001
// JACK is a low-latency audio server, originally written for the
2002
// GNU/Linux operating system and now also ported to OS-X. It can
2003
// connect a number of different applications to an audio device, as
2004
// well as allowing them to share audio between themselves.
2005
//
2006
// When using JACK with RtAudio, "devices" refer to JACK clients that
2007
// have ports connected to the server.  The JACK server is typically
2008
// started in a terminal as follows:
2009
//
2010
// .jackd -d alsa -d hw:0
2011
//
2012
// or through an interface program such as qjackctl.  Many of the
2013
// parameters normally set for a stream are fixed by the JACK server
2014
// and can be specified when the JACK server is started.  In
2015
// particular,
2016
//
2017
// .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2018
//
2019
// specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2020
// frames, and number of buffers = 4.  Once the server is running, it
2021
// is not possible to override these values.  If the values are not
2022
// specified in the command-line, the JACK server uses default values.
2023
//
2024
// The JACK server does not have to be running when an instance of
2025
// RtApiJack is created, though the function getDeviceCount() will
2026
// report 0 devices found until JACK has been started.  When no
2027
// devices are available (i.e., the JACK server is not running), a
2028
// stream cannot be opened.
2029
 
2030
#include <jack/jack.h>
2031
#include <unistd.h>
2032
#include <cstdio>
2033
 
2034
// A structure to hold various information related to the Jack API
2035
// implementation.
2036
struct JackHandle {
2037
  jack_client_t *client;
2038
  jack_port_t **ports[2];
2039
  std::string deviceName[2];
2040
  bool xrun[2];
2041
  pthread_cond_t condition;
2042
  int drainCounter;       // Tracks callback counts when draining
2043
  bool internalDrain;     // Indicates if stop is initiated from callback or not.
2044
 
2045
  JackHandle()
2046
    :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2047
};
2048
 
2049
#if !defined(__RTAUDIO_DEBUG__)
2050
static void jackSilentError( const char * ) {};
2051
#endif
2052
 
2053
RtApiJack :: RtApiJack()
2054
    :shouldAutoconnect_(true) {
2055
  // Nothing to do here.
2056
#if !defined(__RTAUDIO_DEBUG__)
2057
  // Turn off Jack's internal error reporting.
2058
  jack_set_error_function( &jackSilentError );
2059
#endif
2060
}
2061
 
2062
RtApiJack :: ~RtApiJack()
2063
{
2064
  if ( stream_.state != STREAM_CLOSED ) closeStream();
2065
}
2066
 
2067
unsigned int RtApiJack :: getDeviceCount( void )
2068
{
2069
  // See if we can become a jack client.
2070
  jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2071
  jack_status_t *status = NULL;
2072
  jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2073
  if ( client == 0 ) return 0;
2074
 
2075
  const char **ports;
2076
  std::string port, previousPort;
2077
  unsigned int nChannels = 0, nDevices = 0;
2078
  ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2079
  if ( ports ) {
2080
    // Parse the port names up to the first colon (:).
2081
    size_t iColon = 0;
2082
    do {
2083
      port = (char *) ports[ nChannels ];
2084
      iColon = port.find(":");
2085
      if ( iColon != std::string::npos ) {
2086
        port = port.substr( 0, iColon + 1 );
2087
        if ( port != previousPort ) {
2088
          nDevices++;
2089
          previousPort = port;
2090
        }
2091
      }
2092
    } while ( ports[++nChannels] );
2093
    free( ports );
2094
  }
2095
 
2096
  jack_client_close( client );
2097
  return nDevices;
2098
}
2099
 
2100
RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2101
{
2102
  RtAudio::DeviceInfo info;
2103
  info.probed = false;
2104
 
2105
  jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2106
  jack_status_t *status = NULL;
2107
  jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2108
  if ( client == 0 ) {
2109
    errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2110
    error( RtAudioError::WARNING );
2111
    return info;
2112
  }
2113
 
2114
  const char **ports;
2115
  std::string port, previousPort;
2116
  unsigned int nPorts = 0, nDevices = 0;
2117
  ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2118
  if ( ports ) {
2119
    // Parse the port names up to the first colon (:).
2120
    size_t iColon = 0;
2121
    do {
2122
      port = (char *) ports[ nPorts ];
2123
      iColon = port.find(":");
2124
      if ( iColon != std::string::npos ) {
2125
        port = port.substr( 0, iColon );
2126
        if ( port != previousPort ) {
2127
          if ( nDevices == device ) info.name = port;
2128
          nDevices++;
2129
          previousPort = port;
2130
        }
2131
      }
2132
    } while ( ports[++nPorts] );
2133
    free( ports );
2134
  }
2135
 
2136
  if ( device >= nDevices ) {
2137
    jack_client_close( client );
2138
    errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2139
    error( RtAudioError::INVALID_USE );
2140
    return info;
2141
  }
2142
 
2143
  // Get the current jack server sample rate.
2144
  info.sampleRates.clear();
2145
 
2146
  info.preferredSampleRate = jack_get_sample_rate( client );
2147
  info.sampleRates.push_back( info.preferredSampleRate );
2148
 
2149
  // Count the available ports containing the client name as device
2150
  // channels.  Jack "input ports" equal RtAudio output channels.
2151
  unsigned int nChannels = 0;
2152
  ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2153
  if ( ports ) {
2154
    while ( ports[ nChannels ] ) nChannels++;
2155
    free( ports );
2156
    info.outputChannels = nChannels;
2157
  }
2158
 
2159
  // Jack "output ports" equal RtAudio input channels.
2160
  nChannels = 0;
2161
  ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2162
  if ( ports ) {
2163
    while ( ports[ nChannels ] ) nChannels++;
2164
    free( ports );
2165
    info.inputChannels = nChannels;
2166
  }
2167
 
2168
  if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2169
    jack_client_close(client);
2170
    errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2171
    error( RtAudioError::WARNING );
2172
    return info;
2173
  }
2174
 
2175
  // If device opens for both playback and capture, we determine the channels.
2176
  if ( info.outputChannels > 0 && info.inputChannels > 0 )
2177
    info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2178
 
2179
  // Jack always uses 32-bit floats.
2180
  info.nativeFormats = RTAUDIO_FLOAT32;
2181
 
2182
  // Jack doesn't provide default devices so we'll use the first available one.
2183
  if ( device == 0 && info.outputChannels > 0 )
2184
    info.isDefaultOutput = true;
2185
  if ( device == 0 && info.inputChannels > 0 )
2186
    info.isDefaultInput = true;
2187
 
2188
  jack_client_close(client);
2189
  info.probed = true;
2190
  return info;
2191
}
2192
 
2193
static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2194
{
2195
  CallbackInfo *info = (CallbackInfo *) infoPointer;
2196
 
2197
  RtApiJack *object = (RtApiJack *) info->object;
2198
  if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2199
 
2200
  return 0;
2201
}
2202
 
2203
// This function will be called by a spawned thread when the Jack
2204
// server signals that it is shutting down.  It is necessary to handle
2205
// it this way because the jackShutdown() function must return before
2206
// the jack_deactivate() function (in closeStream()) will return.
2207
static void *jackCloseStream( void *ptr )
2208
{
2209
  CallbackInfo *info = (CallbackInfo *) ptr;
2210
  RtApiJack *object = (RtApiJack *) info->object;
2211
 
2212
  object->closeStream();
2213
 
2214
  pthread_exit( NULL );
2215
}
2216
static void jackShutdown( void *infoPointer )
2217
{
2218
  CallbackInfo *info = (CallbackInfo *) infoPointer;
2219
  RtApiJack *object = (RtApiJack *) info->object;
2220
 
2221
  // Check current stream state.  If stopped, then we'll assume this
2222
  // was called as a result of a call to RtApiJack::stopStream (the
2223
  // deactivation of a client handle causes this function to be called).
2224
  // If not, we'll assume the Jack server is shutting down or some
2225
  // other problem occurred and we should close the stream.
2226
  if ( object->isStreamRunning() == false ) return;
2227
 
2228
  ThreadHandle threadId;
2229
  pthread_create( &threadId, NULL, jackCloseStream, info );
2230
  std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2231
}
2232
 
2233
static int jackXrun( void *infoPointer )
2234
{
2235
  JackHandle *handle = *((JackHandle **) infoPointer);
2236
 
2237
  if ( handle->ports[0] ) handle->xrun[0] = true;
2238
  if ( handle->ports[1] ) handle->xrun[1] = true;
2239
 
2240
  return 0;
2241
}
2242
 
2243
bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2244
                                   unsigned int firstChannel, unsigned int sampleRate,
2245
                                   RtAudioFormat format, unsigned int *bufferSize,
2246
                                   RtAudio::StreamOptions *options )
2247
{
2248
  JackHandle *handle = (JackHandle *) stream_.apiHandle;
2249
 
2250
  // Look for jack server and try to become a client (only do once per stream).
2251
  jack_client_t *client = 0;
2252
  if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2253
    jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2254
    jack_status_t *status = NULL;
2255
    if ( options && !options->streamName.empty() )
2256
      client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2257
    else
2258
      client = jack_client_open( "RtApiJack", jackoptions, status );
2259
    if ( client == 0 ) {
2260
      errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2261
      error( RtAudioError::WARNING );
2262
      return FAILURE;
2263
    }
2264
  }
2265
  else {
2266
    // The handle must have been created on an earlier pass.
2267
    client = handle->client;
2268
  }
2269
 
2270
  const char **ports;
2271
  std::string port, previousPort, deviceName;
2272
  unsigned int nPorts = 0, nDevices = 0;
2273
  ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2274
  if ( ports ) {
2275
    // Parse the port names up to the first colon (:).
2276
    size_t iColon = 0;
2277
    do {
2278
      port = (char *) ports[ nPorts ];
2279
      iColon = port.find(":");
2280
      if ( iColon != std::string::npos ) {
2281
        port = port.substr( 0, iColon );
2282
        if ( port != previousPort ) {
2283
          if ( nDevices == device ) deviceName = port;
2284
          nDevices++;
2285
          previousPort = port;
2286
        }
2287
      }
2288
    } while ( ports[++nPorts] );
2289
    free( ports );
2290
  }
2291
 
2292
  if ( device >= nDevices ) {
2293
    errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2294
    return FAILURE;
2295
  }
2296
 
2297
  unsigned long flag = JackPortIsInput;
2298
  if ( mode == INPUT ) flag = JackPortIsOutput;
2299
 
2300
  if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2301
    // Count the available ports containing the client name as device
2302
    // channels.  Jack "input ports" equal RtAudio output channels.
2303
    unsigned int nChannels = 0;
2304
    ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2305
    if ( ports ) {
2306
      while ( ports[ nChannels ] ) nChannels++;
2307
      free( ports );
2308
    }
2309
    // Compare the jack ports for specified client to the requested number of channels.
2310
    if ( nChannels < (channels + firstChannel) ) {
2311
      errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2312
      errorText_ = errorStream_.str();
2313
      return FAILURE;
2314
    }
2315
  }
2316
 
2317
  // Check the jack server sample rate.
2318
  unsigned int jackRate = jack_get_sample_rate( client );
2319
  if ( sampleRate != jackRate ) {
2320
    jack_client_close( client );
2321
    errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2322
    errorText_ = errorStream_.str();
2323
    return FAILURE;
2324
  }
2325
  stream_.sampleRate = jackRate;
2326
 
2327
  // Get the latency of the JACK port.
2328
  ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2329
  if ( ports[ firstChannel ] ) {
2330
    // Added by Ge Wang
2331
    jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2332
    // the range (usually the min and max are equal)
2333
    jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2334
    // get the latency range
2335
    jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2336
    // be optimistic, use the min!
2337
    stream_.latency[mode] = latrange.min;
2338
    //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2339
  }
2340
  free( ports );
2341
 
2342
  // The jack server always uses 32-bit floating-point data.
2343
  stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2344
  stream_.userFormat = format;
2345
 
2346
  if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2347
  else stream_.userInterleaved = true;
2348
 
2349
  // Jack always uses non-interleaved buffers.
2350
  stream_.deviceInterleaved[mode] = false;
2351
 
2352
  // Jack always provides host byte-ordered data.
2353
  stream_.doByteSwap[mode] = false;
2354
 
2355
  // Get the buffer size.  The buffer size and number of buffers
2356
  // (periods) is set when the jack server is started.
2357
  stream_.bufferSize = (int) jack_get_buffer_size( client );
2358
  *bufferSize = stream_.bufferSize;
2359
 
2360
  stream_.nDeviceChannels[mode] = channels;
2361
  stream_.nUserChannels[mode] = channels;
2362
 
2363
  // Set flags for buffer conversion.
2364
  stream_.doConvertBuffer[mode] = false;
2365
  if ( stream_.userFormat != stream_.deviceFormat[mode] )
2366
    stream_.doConvertBuffer[mode] = true;
2367
  if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2368
       stream_.nUserChannels[mode] > 1 )
2369
    stream_.doConvertBuffer[mode] = true;
2370
 
2371
  // Allocate our JackHandle structure for the stream.
2372
  if ( handle == 0 ) {
2373
    try {
2374
      handle = new JackHandle;
2375
    }
2376
    catch ( std::bad_alloc& ) {
2377
      errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2378
      goto error;
2379
    }
2380
 
2381
    if ( pthread_cond_init(&handle->condition, NULL) ) {
2382
      errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2383
      goto error;
2384
    }
2385
    stream_.apiHandle = (void *) handle;
2386
    handle->client = client;
2387
  }
2388
  handle->deviceName[mode] = deviceName;
2389
 
2390
  // Allocate necessary internal buffers.
2391
  unsigned long bufferBytes;
2392
  bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2393
  stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2394
  if ( stream_.userBuffer[mode] == NULL ) {
2395
    errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2396
    goto error;
2397
  }
2398
 
2399
  if ( stream_.doConvertBuffer[mode] ) {
2400
 
2401
    bool makeBuffer = true;
2402
    if ( mode == OUTPUT )
2403
      bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2404
    else { // mode == INPUT
2405
      bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2406
      if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2407
        unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2408
        if ( bufferBytes < bytesOut ) makeBuffer = false;
2409
      }
2410
    }
2411
 
2412
    if ( makeBuffer ) {
2413
      bufferBytes *= *bufferSize;
2414
      if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2415
      stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2416
      if ( stream_.deviceBuffer == NULL ) {
2417
        errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2418
        goto error;
2419
      }
2420
    }
2421
  }
2422
 
2423
  // Allocate memory for the Jack ports (channels) identifiers.
2424
  handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2425
  if ( handle->ports[mode] == NULL )  {
2426
    errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2427
    goto error;
2428
  }
2429
 
2430
  stream_.device[mode] = device;
2431
  stream_.channelOffset[mode] = firstChannel;
2432
  stream_.state = STREAM_STOPPED;
2433
  stream_.callbackInfo.object = (void *) this;
2434
 
2435
  if ( stream_.mode == OUTPUT && mode == INPUT )
2436
    // We had already set up the stream for output.
2437
    stream_.mode = DUPLEX;
2438
  else {
2439
    stream_.mode = mode;
2440
    jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2441
    jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2442
    jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2443
  }
2444
 
2445
  // Register our ports.
2446
  char label[64];
2447
  if ( mode == OUTPUT ) {
2448
    for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2449
      snprintf( label, 64, "outport %d", i );
2450
      handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2451
                                                JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2452
    }
2453
  }
2454
  else {
2455
    for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2456
      snprintf( label, 64, "inport %d", i );
2457
      handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2458
                                                JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2459
    }
2460
  }
2461
 
2462
  // Setup the buffer conversion information structure.  We don't use
2463
  // buffers to do channel offsets, so we override that parameter
2464
  // here.
2465
  if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2466
 
2467
  if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2468
 
2469
  return SUCCESS;
2470
 
2471
 error:
2472
  if ( handle ) {
2473
    pthread_cond_destroy( &handle->condition );
2474
    jack_client_close( handle->client );
2475
 
2476
    if ( handle->ports[0] ) free( handle->ports[0] );
2477
    if ( handle->ports[1] ) free( handle->ports[1] );
2478
 
2479
    delete handle;
2480
    stream_.apiHandle = 0;
2481
  }
2482
 
2483
  for ( int i=0; i<2; i++ ) {
2484
    if ( stream_.userBuffer[i] ) {
2485
      free( stream_.userBuffer[i] );
2486
      stream_.userBuffer[i] = 0;
2487
    }
2488
  }
2489
 
2490
  if ( stream_.deviceBuffer ) {
2491
    free( stream_.deviceBuffer );
2492
    stream_.deviceBuffer = 0;
2493
  }
2494
 
2495
  return FAILURE;
2496
}
2497
 
2498
void RtApiJack :: closeStream( void )
2499
{
2500
  if ( stream_.state == STREAM_CLOSED ) {
2501
    errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2502
    error( RtAudioError::WARNING );
2503
    return;
2504
  }
2505
 
2506
  JackHandle *handle = (JackHandle *) stream_.apiHandle;
2507
  if ( handle ) {
2508
 
2509
    if ( stream_.state == STREAM_RUNNING )
2510
      jack_deactivate( handle->client );
2511
 
2512
    jack_client_close( handle->client );
2513
  }
2514
 
2515
  if ( handle ) {
2516
    if ( handle->ports[0] ) free( handle->ports[0] );
2517
    if ( handle->ports[1] ) free( handle->ports[1] );
2518
    pthread_cond_destroy( &handle->condition );
2519
    delete handle;
2520
    stream_.apiHandle = 0;
2521
  }
2522
 
2523
  for ( int i=0; i<2; i++ ) {
2524
    if ( stream_.userBuffer[i] ) {
2525
      free( stream_.userBuffer[i] );
2526
      stream_.userBuffer[i] = 0;
2527
    }
2528
  }
2529
 
2530
  if ( stream_.deviceBuffer ) {
2531
    free( stream_.deviceBuffer );
2532
    stream_.deviceBuffer = 0;
2533
  }
2534
 
2535
  stream_.mode = UNINITIALIZED;
2536
  stream_.state = STREAM_CLOSED;
2537
}
2538
 
2539
void RtApiJack :: startStream( void )
2540
{
2541
  verifyStream();
2542
  if ( stream_.state == STREAM_RUNNING ) {
2543
    errorText_ = "RtApiJack::startStream(): the stream is already running!";
2544
    error( RtAudioError::WARNING );
2545
    return;
2546
  }
2547
 
2548
  #if defined( HAVE_GETTIMEOFDAY )
2549
  gettimeofday( &stream_.lastTickTimestamp, NULL );
2550
  #endif
2551
 
2552
  JackHandle *handle = (JackHandle *) stream_.apiHandle;
2553
  int result = jack_activate( handle->client );
2554
  if ( result ) {
2555
    errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2556
    goto unlock;
2557
  }
2558
 
2559
  const char **ports;
2560
 
2561
  // Get the list of available ports.
2562
  if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2563
    result = 1;
2564
    ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2565
    if ( ports == NULL) {
2566
      errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2567
      goto unlock;
2568
    }
2569
 
2570
    // Now make the port connections.  Since RtAudio wasn't designed to
2571
    // allow the user to select particular channels of a device, we'll
2572
    // just open the first "nChannels" ports with offset.
2573
    for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2574
      result = 1;
2575
      if ( ports[ stream_.channelOffset[0] + i ] )
2576
        result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2577
      if ( result ) {
2578
        free( ports );
2579
        errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2580
        goto unlock;
2581
      }
2582
    }
2583
    free(ports);
2584
  }
2585
 
2586
  if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2587
    result = 1;
2588
    ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2589
    if ( ports == NULL) {
2590
      errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2591
      goto unlock;
2592
    }
2593
 
2594
    // Now make the port connections.  See note above.
2595
    for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2596
      result = 1;
2597
      if ( ports[ stream_.channelOffset[1] + i ] )
2598
        result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2599
      if ( result ) {
2600
        free( ports );
2601
        errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2602
        goto unlock;
2603
      }
2604
    }
2605
    free(ports);
2606
  }
2607
 
2608
  handle->drainCounter = 0;
2609
  handle->internalDrain = false;
2610
  stream_.state = STREAM_RUNNING;
2611
 
2612
 unlock:
2613
  if ( result == 0 ) return;
2614
  error( RtAudioError::SYSTEM_ERROR );
2615
}
2616
 
2617
void RtApiJack :: stopStream( void )
2618
{
2619
  verifyStream();
2620
  if ( stream_.state == STREAM_STOPPED ) {
2621
    errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2622
    error( RtAudioError::WARNING );
2623
    return;
2624
  }
2625
 
2626
  JackHandle *handle = (JackHandle *) stream_.apiHandle;
2627
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2628
 
2629
    if ( handle->drainCounter == 0 ) {
2630
      handle->drainCounter = 2;
2631
      pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2632
    }
2633
  }
2634
 
2635
  jack_deactivate( handle->client );
2636
  stream_.state = STREAM_STOPPED;
2637
}
2638
 
2639
void RtApiJack :: abortStream( void )
2640
{
2641
  verifyStream();
2642
  if ( stream_.state == STREAM_STOPPED ) {
2643
    errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2644
    error( RtAudioError::WARNING );
2645
    return;
2646
  }
2647
 
2648
  JackHandle *handle = (JackHandle *) stream_.apiHandle;
2649
  handle->drainCounter = 2;
2650
 
2651
  stopStream();
2652
}
2653
 
2654
// This function will be called by a spawned thread when the user
2655
// callback function signals that the stream should be stopped or
2656
// aborted.  It is necessary to handle it this way because the
2657
// callbackEvent() function must return before the jack_deactivate()
2658
// function will return.
2659
static void *jackStopStream( void *ptr )
2660
{
2661
  CallbackInfo *info = (CallbackInfo *) ptr;
2662
  RtApiJack *object = (RtApiJack *) info->object;
2663
 
2664
  object->stopStream();
2665
  pthread_exit( NULL );
2666
}
2667
 
2668
bool RtApiJack :: callbackEvent( unsigned long nframes )
2669
{
2670
  if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2671
  if ( stream_.state == STREAM_CLOSED ) {
2672
    errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2673
    error( RtAudioError::WARNING );
2674
    return FAILURE;
2675
  }
2676
  if ( stream_.bufferSize != nframes ) {
2677
    errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2678
    error( RtAudioError::WARNING );
2679
    return FAILURE;
2680
  }
2681
 
2682
  CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2683
  JackHandle *handle = (JackHandle *) stream_.apiHandle;
2684
 
2685
  // Check if we were draining the stream and signal is finished.
2686
  if ( handle->drainCounter > 3 ) {
2687
    ThreadHandle threadId;
2688
 
2689
    stream_.state = STREAM_STOPPING;
2690
    if ( handle->internalDrain == true )
2691
      pthread_create( &threadId, NULL, jackStopStream, info );
2692
    else
2693
      pthread_cond_signal( &handle->condition );
2694
    return SUCCESS;
2695
  }
2696
 
2697
  // Invoke user callback first, to get fresh output data.
2698
  if ( handle->drainCounter == 0 ) {
2699
    RtAudioCallback callback = (RtAudioCallback) info->callback;
2700
    double streamTime = getStreamTime();
2701
    RtAudioStreamStatus status = 0;
2702
    if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2703
      status |= RTAUDIO_OUTPUT_UNDERFLOW;
2704
      handle->xrun[0] = false;
2705
    }
2706
    if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2707
      status |= RTAUDIO_INPUT_OVERFLOW;
2708
      handle->xrun[1] = false;
2709
    }
2710
    int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2711
                                  stream_.bufferSize, streamTime, status, info->userData );
2712
    if ( cbReturnValue == 2 ) {
2713
      stream_.state = STREAM_STOPPING;
2714
      handle->drainCounter = 2;
2715
      ThreadHandle id;
2716
      pthread_create( &id, NULL, jackStopStream, info );
2717
      return SUCCESS;
2718
    }
2719
    else if ( cbReturnValue == 1 ) {
2720
      handle->drainCounter = 1;
2721
      handle->internalDrain = true;
2722
    }
2723
  }
2724
 
2725
  jack_default_audio_sample_t *jackbuffer;
2726
  unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2727
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2728
 
2729
    if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2730
 
2731
      for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2732
        jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2733
        memset( jackbuffer, 0, bufferBytes );
2734
      }
2735
 
2736
    }
2737
    else if ( stream_.doConvertBuffer[0] ) {
2738
 
2739
      convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2740
 
2741
      for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2742
        jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2743
        memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2744
      }
2745
    }
2746
    else { // no buffer conversion
2747
      for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2748
        jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2749
        memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2750
      }
2751
    }
2752
  }
2753
 
2754
  // Don't bother draining input
2755
  if ( handle->drainCounter ) {
2756
    handle->drainCounter++;
2757
    goto unlock;
2758
  }
2759
 
2760
  if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2761
 
2762
    if ( stream_.doConvertBuffer[1] ) {
2763
      for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2764
        jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2765
        memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2766
      }
2767
      convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2768
    }
2769
    else { // no buffer conversion
2770
      for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2771
        jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2772
        memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2773
      }
2774
    }
2775
  }
2776
 
2777
 unlock:
2778
  RtApi::tickStreamTime();
2779
  return SUCCESS;
2780
}
2781
  //******************** End of __UNIX_JACK__ *********************//
2782
#endif
2783
 
2784
#if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2785
 
2786
// The ASIO API is designed around a callback scheme, so this
2787
// implementation is similar to that used for OS-X CoreAudio and Linux
2788
// Jack.  The primary constraint with ASIO is that it only allows
2789
// access to a single driver at a time.  Thus, it is not possible to
2790
// have more than one simultaneous RtAudio stream.
2791
//
2792
// This implementation also requires a number of external ASIO files
2793
// and a few global variables.  The ASIO callback scheme does not
2794
// allow for the passing of user data, so we must create a global
2795
// pointer to our callbackInfo structure.
2796
//
2797
// On unix systems, we make use of a pthread condition variable.
2798
// Since there is no equivalent in Windows, I hacked something based
2799
// on information found in
2800
// http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2801
 
2802
#include "asiosys.h"
2803
#include "asio.h"
2804
#include "iasiothiscallresolver.h"
2805
#include "asiodrivers.h"
2806
#include <cmath>
2807
 
2808
static AsioDrivers drivers;
2809
static ASIOCallbacks asioCallbacks;
2810
static ASIODriverInfo driverInfo;
2811
static CallbackInfo *asioCallbackInfo;
2812
static bool asioXRun;
2813
 
2814
struct AsioHandle {
2815
  int drainCounter;       // Tracks callback counts when draining
2816
  bool internalDrain;     // Indicates if stop is initiated from callback or not.
2817
  ASIOBufferInfo *bufferInfos;
2818
  HANDLE condition;
2819
 
2820
  AsioHandle()
2821
    :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2822
};
2823
 
2824
// Function declarations (definitions at end of section)
2825
static const char* getAsioErrorString( ASIOError result );
2826
static void sampleRateChanged( ASIOSampleRate sRate );
2827
static long asioMessages( long selector, long value, void* message, double* opt );
2828
 
2829
RtApiAsio :: RtApiAsio()
2830
{
2831
  // ASIO cannot run on a multi-threaded apartment. You can call
2832
  // CoInitialize beforehand, but it must be for apartment threading
2833
  // (in which case, CoInitilialize will return S_FALSE here).
2834
  coInitialized_ = false;
2835
  HRESULT hr = CoInitialize( NULL );
2836
  if ( FAILED(hr) ) {
2837
    errorText_ = "RtApiAsio::ASIO requires a single-threaded apartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2838
    error( RtAudioError::WARNING );
2839
  }
2840
  coInitialized_ = true;
2841
 
2842
  drivers.removeCurrentDriver();
2843
  driverInfo.asioVersion = 2;
2844
 
2845
  // See note in DirectSound implementation about GetDesktopWindow().
2846
  driverInfo.sysRef = GetForegroundWindow();
2847
}
2848
 
2849
RtApiAsio :: ~RtApiAsio()
2850
{
2851
  if ( stream_.state != STREAM_CLOSED ) closeStream();
2852
  if ( coInitialized_ ) CoUninitialize();
2853
}
2854
 
2855
unsigned int RtApiAsio :: getDeviceCount( void )
2856
{
2857
  return (unsigned int) drivers.asioGetNumDev();
2858
}
2859
 
2860
// We can only load one ASIO driver, so the default output is always the first device.
2861
unsigned int RtApiAsio :: getDefaultOutputDevice( void )
2862
{
2863
  return 0;
2864
}
2865
 
2866
// We can only load one ASIO driver, so the default input is always the first device.
2867
unsigned int RtApiAsio :: getDefaultInputDevice( void )
2868
{
2869
  return 0;
2870
}
2871
 
2872
RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2873
{
2874
  RtAudio::DeviceInfo info;
2875
  info.probed = false;
2876
 
2877
  // Get device ID
2878
  unsigned int nDevices = getDeviceCount();
2879
  if ( nDevices == 0 ) {
2880
    errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2881
    error( RtAudioError::INVALID_USE );
2882
    return info;
2883
  }
2884
 
2885
  if ( device >= nDevices ) {
2886
    errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2887
    error( RtAudioError::INVALID_USE );
2888
    return info;
2889
  }
2890
 
2891
  // If a stream is already open, we cannot probe other devices.  Thus, use the saved results.
2892
  if ( stream_.state != STREAM_CLOSED ) {
2893
    if ( device >= devices_.size() ) {
2894
      errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2895
      error( RtAudioError::WARNING );
2896
      return info;
2897
    }
2898
    return devices_[ device ];
2899
  }
2900
 
2901
  char driverName[32];
2902
  ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2903
  if ( result != ASE_OK ) {
2904
    errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2905
    errorText_ = errorStream_.str();
2906
    error( RtAudioError::WARNING );
2907
    return info;
2908
  }
2909
 
2910
  info.name = driverName;
2911
 
2912
  if ( !drivers.loadDriver( driverName ) ) {
2913
    errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2914
    errorText_ = errorStream_.str();
2915
    error( RtAudioError::WARNING );
2916
    return info;
2917
  }
2918
 
2919
  result = ASIOInit( &driverInfo );
2920
  if ( result != ASE_OK ) {
2921
    errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2922
    errorText_ = errorStream_.str();
2923
    error( RtAudioError::WARNING );
2924
    return info;
2925
  }
2926
 
2927
  // Determine the device channel information.
2928
  long inputChannels, outputChannels;
2929
  result = ASIOGetChannels( &inputChannels, &outputChannels );
2930
  if ( result != ASE_OK ) {
2931
    drivers.removeCurrentDriver();
2932
    errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2933
    errorText_ = errorStream_.str();
2934
    error( RtAudioError::WARNING );
2935
    return info;
2936
  }
2937
 
2938
  info.outputChannels = outputChannels;
2939
  info.inputChannels = inputChannels;
2940
  if ( info.outputChannels > 0 && info.inputChannels > 0 )
2941
    info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2942
 
2943
  // Determine the supported sample rates.
2944
  info.sampleRates.clear();
2945
  for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2946
    result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2947
    if ( result == ASE_OK ) {
2948
      info.sampleRates.push_back( SAMPLE_RATES[i] );
2949
 
2950
      if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2951
        info.preferredSampleRate = SAMPLE_RATES[i];
2952
    }
2953
  }
2954
 
2955
  // Determine supported data types ... just check first channel and assume rest are the same.
2956
  ASIOChannelInfo channelInfo;
2957
  channelInfo.channel = 0;
2958
  channelInfo.isInput = true;
2959
  if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2960
  result = ASIOGetChannelInfo( &channelInfo );
2961
  if ( result != ASE_OK ) {
2962
    drivers.removeCurrentDriver();
2963
    errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2964
    errorText_ = errorStream_.str();
2965
    error( RtAudioError::WARNING );
2966
    return info;
2967
  }
2968
 
2969
  info.nativeFormats = 0;
2970
  if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2971
    info.nativeFormats |= RTAUDIO_SINT16;
2972
  else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2973
    info.nativeFormats |= RTAUDIO_SINT32;
2974
  else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2975
    info.nativeFormats |= RTAUDIO_FLOAT32;
2976
  else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2977
    info.nativeFormats |= RTAUDIO_FLOAT64;
2978
  else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2979
    info.nativeFormats |= RTAUDIO_SINT24;
2980
 
2981
  if ( info.outputChannels > 0 )
2982
    if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2983
  if ( info.inputChannels > 0 )
2984
    if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2985
 
2986
  info.probed = true;
2987
  drivers.removeCurrentDriver();
2988
  return info;
2989
}
2990
 
2991
static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2992
{
2993
  RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2994
  object->callbackEvent( index );
2995
}
2996
 
2997
void RtApiAsio :: saveDeviceInfo( void )
2998
{
2999
  devices_.clear();
3000
 
3001
  unsigned int nDevices = getDeviceCount();
3002
  devices_.resize( nDevices );
3003
  for ( unsigned int i=0; i<nDevices; i++ )
3004
    devices_[i] = getDeviceInfo( i );
3005
}
3006
 
3007
bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
3008
                                   unsigned int firstChannel, unsigned int sampleRate,
3009
                                   RtAudioFormat format, unsigned int *bufferSize,
3010
                                   RtAudio::StreamOptions *options )
3011
{////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3012
 
3013
  bool isDuplexInput =  mode == INPUT && stream_.mode == OUTPUT;
3014
 
3015
  // For ASIO, a duplex stream MUST use the same driver.
3016
  if ( isDuplexInput && stream_.device[0] != device ) {
3017
    errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
3018
    return FAILURE;
3019
  }
3020
 
3021
  char driverName[32];
3022
  ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3023
  if ( result != ASE_OK ) {
3024
    errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3025
    errorText_ = errorStream_.str();
3026
    return FAILURE;
3027
  }
3028
 
3029
  // Only load the driver once for duplex stream.
3030
  if ( !isDuplexInput ) {
3031
    // The getDeviceInfo() function will not work when a stream is open
3032
    // because ASIO does not allow multiple devices to run at the same
3033
    // time.  Thus, we'll probe the system before opening a stream and
3034
    // save the results for use by getDeviceInfo().
3035
    this->saveDeviceInfo();
3036
 
3037
    if ( !drivers.loadDriver( driverName ) ) {
3038
      errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3039
      errorText_ = errorStream_.str();
3040
      return FAILURE;
3041
    }
3042
 
3043
    result = ASIOInit( &driverInfo );
3044
    if ( result != ASE_OK ) {
3045
      errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3046
      errorText_ = errorStream_.str();
3047
      return FAILURE;
3048
    }
3049
  }
3050
 
3051
  // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3052
  bool buffersAllocated = false;
3053
  AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3054
  unsigned int nChannels;
3055
 
3056
 
3057
  // Check the device channel count.
3058
  long inputChannels, outputChannels;
3059
  result = ASIOGetChannels( &inputChannels, &outputChannels );
3060
  if ( result != ASE_OK ) {
3061
    errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3062
    errorText_ = errorStream_.str();
3063
    goto error;
3064
  }
3065
 
3066
  if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3067
       ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3068
    errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3069
    errorText_ = errorStream_.str();
3070
    goto error;
3071
  }
3072
  stream_.nDeviceChannels[mode] = channels;
3073
  stream_.nUserChannels[mode] = channels;
3074
  stream_.channelOffset[mode] = firstChannel;
3075
 
3076
  // Verify the sample rate is supported.
3077
  result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3078
  if ( result != ASE_OK ) {
3079
    errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3080
    errorText_ = errorStream_.str();
3081
    goto error;
3082
  }
3083
 
3084
  // Get the current sample rate
3085
  ASIOSampleRate currentRate;
3086
  result = ASIOGetSampleRate( &currentRate );
3087
  if ( result != ASE_OK ) {
3088
    errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3089
    errorText_ = errorStream_.str();
3090
    goto error;
3091
  }
3092
 
3093
  // Set the sample rate only if necessary
3094
  if ( currentRate != sampleRate ) {
3095
    result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3096
    if ( result != ASE_OK ) {
3097
      errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3098
      errorText_ = errorStream_.str();
3099
      goto error;
3100
    }
3101
  }
3102
 
3103
  // Determine the driver data type.
3104
  ASIOChannelInfo channelInfo;
3105
  channelInfo.channel = 0;
3106
  if ( mode == OUTPUT ) channelInfo.isInput = false;
3107
  else channelInfo.isInput = true;
3108
  result = ASIOGetChannelInfo( &channelInfo );
3109
  if ( result != ASE_OK ) {
3110
    errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3111
    errorText_ = errorStream_.str();
3112
    goto error;
3113
  }
3114
 
3115
  // Assuming WINDOWS host is always little-endian.
3116
  stream_.doByteSwap[mode] = false;
3117
  stream_.userFormat = format;
3118
  stream_.deviceFormat[mode] = 0;
3119
  if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3120
    stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3121
    if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3122
  }
3123
  else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3124
    stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3125
    if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3126
  }
3127
  else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3128
    stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3129
    if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3130
  }
3131
  else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3132
    stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3133
    if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3134
  }
3135
  else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3136
    stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3137
    if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3138
  }
3139
 
3140
  if ( stream_.deviceFormat[mode] == 0 ) {
3141
    errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3142
    errorText_ = errorStream_.str();
3143
    goto error;
3144
  }
3145
 
3146
  // Set the buffer size.  For a duplex stream, this will end up
3147
  // setting the buffer size based on the input constraints, which
3148
  // should be ok.
3149
  long minSize, maxSize, preferSize, granularity;
3150
  result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3151
  if ( result != ASE_OK ) {
3152
    errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3153
    errorText_ = errorStream_.str();
3154
    goto error;
3155
  }
3156
 
3157
  if ( isDuplexInput ) {
3158
    // When this is the duplex input (output was opened before), then we have to use the same
3159
    // buffersize as the output, because it might use the preferred buffer size, which most
3160
    // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3161
    // So instead of throwing an error, make them equal. The caller uses the reference
3162
    // to the "bufferSize" param as usual to set up processing buffers.
3163
 
3164
    *bufferSize = stream_.bufferSize;
3165
 
3166
  } else {
3167
    if ( *bufferSize == 0 ) *bufferSize = preferSize;
3168
    else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3169
    else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3170
    else if ( granularity == -1 ) {
3171
      // Make sure bufferSize is a power of two.
3172
      int log2_of_min_size = 0;
3173
      int log2_of_max_size = 0;
3174
 
3175
      for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3176
        if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3177
        if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3178
      }
3179
 
3180
      long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3181
      int min_delta_num = log2_of_min_size;
3182
 
3183
      for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3184
        long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3185
        if (current_delta < min_delta) {
3186
          min_delta = current_delta;
3187
          min_delta_num = i;
3188
        }
3189
      }
3190
 
3191
      *bufferSize = ( (unsigned int)1 << min_delta_num );
3192
      if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3193
      else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3194
    }
3195
    else if ( granularity != 0 ) {
3196
      // Set to an even multiple of granularity, rounding up.
3197
      *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3198
    }
3199
  }
3200
 
3201
  /*
3202
  // we don't use it anymore, see above!
3203
  // Just left it here for the case...
3204
  if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3205
    errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3206
    goto error;
3207
  }
3208
  */
3209
 
3210
  stream_.bufferSize = *bufferSize;
3211
  stream_.nBuffers = 2;
3212
 
3213
  if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3214
  else stream_.userInterleaved = true;
3215
 
3216
  // ASIO always uses non-interleaved buffers.
3217
  stream_.deviceInterleaved[mode] = false;
3218
 
3219
  // Allocate, if necessary, our AsioHandle structure for the stream.
3220
  if ( handle == 0 ) {
3221
    try {
3222
      handle = new AsioHandle;
3223
    }
3224
    catch ( std::bad_alloc& ) {
3225
      errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3226
      goto error;
3227
    }
3228
    handle->bufferInfos = 0;
3229
 
3230
    // Create a manual-reset event.
3231
    handle->condition = CreateEvent( NULL,   // no security
3232
                                     TRUE,   // manual-reset
3233
                                     FALSE,  // non-signaled initially
3234
                                     NULL ); // unnamed
3235
    stream_.apiHandle = (void *) handle;
3236
  }
3237
 
3238
  // Create the ASIO internal buffers.  Since RtAudio sets up input
3239
  // and output separately, we'll have to dispose of previously
3240
  // created output buffers for a duplex stream.
3241
  if ( mode == INPUT && stream_.mode == OUTPUT ) {
3242
    ASIODisposeBuffers();
3243
    if ( handle->bufferInfos ) free( handle->bufferInfos );
3244
  }
3245
 
3246
  // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3247
  unsigned int i;
3248
  nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3249
  handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3250
  if ( handle->bufferInfos == NULL ) {
3251
    errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3252
    errorText_ = errorStream_.str();
3253
    goto error;
3254
  }
3255
 
3256
  ASIOBufferInfo *infos;
3257
  infos = handle->bufferInfos;
3258
  for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3259
    infos->isInput = ASIOFalse;
3260
    infos->channelNum = i + stream_.channelOffset[0];
3261
    infos->buffers[0] = infos->buffers[1] = 0;
3262
  }
3263
  for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3264
    infos->isInput = ASIOTrue;
3265
    infos->channelNum = i + stream_.channelOffset[1];
3266
    infos->buffers[0] = infos->buffers[1] = 0;
3267
  }
3268
 
3269
  // prepare for callbacks
3270
  stream_.sampleRate = sampleRate;
3271
  stream_.device[mode] = device;
3272
  stream_.mode = isDuplexInput ? DUPLEX : mode;
3273
 
3274
  // store this class instance before registering callbacks, that are going to use it
3275
  asioCallbackInfo = &stream_.callbackInfo;
3276
  stream_.callbackInfo.object = (void *) this;
3277
 
3278
  // Set up the ASIO callback structure and create the ASIO data buffers.
3279
  asioCallbacks.bufferSwitch = &bufferSwitch;
3280
  asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3281
  asioCallbacks.asioMessage = &asioMessages;
3282
  asioCallbacks.bufferSwitchTimeInfo = NULL;
3283
  result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3284
  if ( result != ASE_OK ) {
3285
    // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3286
    // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3287
    // In that case, let's be naïve and try that instead.
3288
    *bufferSize = preferSize;
3289
    stream_.bufferSize = *bufferSize;
3290
    result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3291
  }
3292
 
3293
  if ( result != ASE_OK ) {
3294
    errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3295
    errorText_ = errorStream_.str();
3296
    goto error;
3297
  }
3298
  buffersAllocated = true;  
3299
  stream_.state = STREAM_STOPPED;
3300
 
3301
  // Set flags for buffer conversion.
3302
  stream_.doConvertBuffer[mode] = false;
3303
  if ( stream_.userFormat != stream_.deviceFormat[mode] )
3304
    stream_.doConvertBuffer[mode] = true;
3305
  if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3306
       stream_.nUserChannels[mode] > 1 )
3307
    stream_.doConvertBuffer[mode] = true;
3308
 
3309
  // Allocate necessary internal buffers
3310
  unsigned long bufferBytes;
3311
  bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3312
  stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3313
  if ( stream_.userBuffer[mode] == NULL ) {
3314
    errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3315
    goto error;
3316
  }
3317
 
3318
  if ( stream_.doConvertBuffer[mode] ) {
3319
 
3320
    bool makeBuffer = true;
3321
    bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3322
    if ( isDuplexInput && stream_.deviceBuffer ) {
3323
      unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3324
      if ( bufferBytes <= bytesOut ) makeBuffer = false;
3325
    }
3326
 
3327
    if ( makeBuffer ) {
3328
      bufferBytes *= *bufferSize;
3329
      if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3330
      stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3331
      if ( stream_.deviceBuffer == NULL ) {
3332
        errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3333
        goto error;
3334
      }
3335
    }
3336
  }
3337
 
3338
  // Determine device latencies
3339
  long inputLatency, outputLatency;
3340
  result = ASIOGetLatencies( &inputLatency, &outputLatency );
3341
  if ( result != ASE_OK ) {
3342
    errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3343
    errorText_ = errorStream_.str();
3344
    error( RtAudioError::WARNING); // warn but don't fail
3345
  }
3346
  else {
3347
    stream_.latency[0] = outputLatency;
3348
    stream_.latency[1] = inputLatency;
3349
  }
3350
 
3351
  // Setup the buffer conversion information structure.  We don't use
3352
  // buffers to do channel offsets, so we override that parameter
3353
  // here.
3354
  if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3355
 
3356
  return SUCCESS;
3357
 
3358
 error:
3359
  if ( !isDuplexInput ) {
3360
    // the cleanup for error in the duplex input, is done by RtApi::openStream
3361
    // So we clean up for single channel only
3362
 
3363
    if ( buffersAllocated )
3364
      ASIODisposeBuffers();
3365
 
3366
    drivers.removeCurrentDriver();
3367
 
3368
    if ( handle ) {
3369
      CloseHandle( handle->condition );
3370
      if ( handle->bufferInfos )
3371
        free( handle->bufferInfos );
3372
 
3373
      delete handle;
3374
      stream_.apiHandle = 0;
3375
    }
3376
 
3377
 
3378
    if ( stream_.userBuffer[mode] ) {
3379
      free( stream_.userBuffer[mode] );
3380
      stream_.userBuffer[mode] = 0;
3381
    }
3382
 
3383
    if ( stream_.deviceBuffer ) {
3384
      free( stream_.deviceBuffer );
3385
      stream_.deviceBuffer = 0;
3386
    }
3387
  }
3388
 
3389
  return FAILURE;
3390
}////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3391
 
3392
void RtApiAsio :: closeStream()
3393
{
3394
  if ( stream_.state == STREAM_CLOSED ) {
3395
    errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3396
    error( RtAudioError::WARNING );
3397
    return;
3398
  }
3399
 
3400
  if ( stream_.state == STREAM_RUNNING ) {
3401
    stream_.state = STREAM_STOPPED;
3402
    ASIOStop();
3403
  }
3404
  ASIODisposeBuffers();
3405
  drivers.removeCurrentDriver();
3406
 
3407
  AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3408
  if ( handle ) {
3409
    CloseHandle( handle->condition );
3410
    if ( handle->bufferInfos )
3411
      free( handle->bufferInfos );
3412
    delete handle;
3413
    stream_.apiHandle = 0;
3414
  }
3415
 
3416
  for ( int i=0; i<2; i++ ) {
3417
    if ( stream_.userBuffer[i] ) {
3418
      free( stream_.userBuffer[i] );
3419
      stream_.userBuffer[i] = 0;
3420
    }
3421
  }
3422
 
3423
  if ( stream_.deviceBuffer ) {
3424
    free( stream_.deviceBuffer );
3425
    stream_.deviceBuffer = 0;
3426
  }
3427
 
3428
  stream_.mode = UNINITIALIZED;
3429
  stream_.state = STREAM_CLOSED;
3430
}
3431
 
3432
bool stopThreadCalled = false;
3433
 
3434
void RtApiAsio :: startStream()
3435
{
3436
  verifyStream();
3437
  if ( stream_.state == STREAM_RUNNING ) {
3438
    errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3439
    error( RtAudioError::WARNING );
3440
    return;
3441
  }
3442
 
3443
  #if defined( HAVE_GETTIMEOFDAY )
3444
  gettimeofday( &stream_.lastTickTimestamp, NULL );
3445
  #endif
3446
 
3447
  AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3448
  ASIOError result = ASIOStart();
3449
  if ( result != ASE_OK ) {
3450
    errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3451
    errorText_ = errorStream_.str();
3452
    goto unlock;
3453
  }
3454
 
3455
  handle->drainCounter = 0;
3456
  handle->internalDrain = false;
3457
  ResetEvent( handle->condition );
3458
  stream_.state = STREAM_RUNNING;
3459
  asioXRun = false;
3460
 
3461
 unlock:
3462
  stopThreadCalled = false;
3463
 
3464
  if ( result == ASE_OK ) return;
3465
  error( RtAudioError::SYSTEM_ERROR );
3466
}
3467
 
3468
void RtApiAsio :: stopStream()
3469
{
3470
  verifyStream();
3471
  if ( stream_.state == STREAM_STOPPED ) {
3472
    errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3473
    error( RtAudioError::WARNING );
3474
    return;
3475
  }
3476
 
3477
  AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3478
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3479
    if ( handle->drainCounter == 0 ) {
3480
      handle->drainCounter = 2;
3481
      WaitForSingleObject( handle->condition, INFINITE );  // block until signaled
3482
    }
3483
  }
3484
 
3485
  stream_.state = STREAM_STOPPED;
3486
 
3487
  ASIOError result = ASIOStop();
3488
  if ( result != ASE_OK ) {
3489
    errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3490
    errorText_ = errorStream_.str();
3491
  }
3492
 
3493
  if ( result == ASE_OK ) return;
3494
  error( RtAudioError::SYSTEM_ERROR );
3495
}
3496
 
3497
void RtApiAsio :: abortStream()
3498
{
3499
  verifyStream();
3500
  if ( stream_.state == STREAM_STOPPED ) {
3501
    errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3502
    error( RtAudioError::WARNING );
3503
    return;
3504
  }
3505
 
3506
  // The following lines were commented-out because some behavior was
3507
  // noted where the device buffers need to be zeroed to avoid
3508
  // continuing sound, even when the device buffers are completely
3509
  // disposed.  So now, calling abort is the same as calling stop.
3510
  // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3511
  // handle->drainCounter = 2;
3512
  stopStream();
3513
}
3514
 
3515
// This function will be called by a spawned thread when the user
3516
// callback function signals that the stream should be stopped or
3517
// aborted.  It is necessary to handle it this way because the
3518
// callbackEvent() function must return before the ASIOStop()
3519
// function will return.
3520
static unsigned __stdcall asioStopStream( void *ptr )
3521
{
3522
  CallbackInfo *info = (CallbackInfo *) ptr;
3523
  RtApiAsio *object = (RtApiAsio *) info->object;
3524
 
3525
  object->stopStream();
3526
  _endthreadex( 0 );
3527
  return 0;
3528
}
3529
 
3530
bool RtApiAsio :: callbackEvent( long bufferIndex )
3531
{
3532
  if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3533
  if ( stream_.state == STREAM_CLOSED ) {
3534
    errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3535
    error( RtAudioError::WARNING );
3536
    return FAILURE;
3537
  }
3538
 
3539
  CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3540
  AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3541
 
3542
  // Check if we were draining the stream and signal if finished.
3543
  if ( handle->drainCounter > 3 ) {
3544
 
3545
    stream_.state = STREAM_STOPPING;
3546
    if ( handle->internalDrain == false )
3547
      SetEvent( handle->condition );
3548
    else { // spawn a thread to stop the stream
3549
      unsigned threadId;
3550
      stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3551
                                                    &stream_.callbackInfo, 0, &threadId );
3552
    }
3553
    return SUCCESS;
3554
  }
3555
 
3556
  // Invoke user callback to get fresh output data UNLESS we are
3557
  // draining stream.
3558
  if ( handle->drainCounter == 0 ) {
3559
    RtAudioCallback callback = (RtAudioCallback) info->callback;
3560
    double streamTime = getStreamTime();
3561
    RtAudioStreamStatus status = 0;
3562
    if ( stream_.mode != INPUT && asioXRun == true ) {
3563
      status |= RTAUDIO_OUTPUT_UNDERFLOW;
3564
      asioXRun = false;
3565
    }
3566
    if ( stream_.mode != OUTPUT && asioXRun == true ) {
3567
      status |= RTAUDIO_INPUT_OVERFLOW;
3568
      asioXRun = false;
3569
    }
3570
    int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3571
                                     stream_.bufferSize, streamTime, status, info->userData );
3572
    if ( cbReturnValue == 2 ) {
3573
      stream_.state = STREAM_STOPPING;
3574
      handle->drainCounter = 2;
3575
      unsigned threadId;
3576
      stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3577
                                                    &stream_.callbackInfo, 0, &threadId );
3578
      return SUCCESS;
3579
    }
3580
    else if ( cbReturnValue == 1 ) {
3581
      handle->drainCounter = 1;
3582
      handle->internalDrain = true;
3583
    }
3584
  }
3585
 
3586
  unsigned int nChannels, bufferBytes, i, j;
3587
  nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3588
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3589
 
3590
    bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3591
 
3592
    if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3593
 
3594
      for ( i=0, j=0; i<nChannels; i++ ) {
3595
        if ( handle->bufferInfos[i].isInput != ASIOTrue )
3596
          memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3597
      }
3598
 
3599
    }
3600
    else if ( stream_.doConvertBuffer[0] ) {
3601
 
3602
      convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3603
      if ( stream_.doByteSwap[0] )
3604
        byteSwapBuffer( stream_.deviceBuffer,
3605
                        stream_.bufferSize * stream_.nDeviceChannels[0],
3606
                        stream_.deviceFormat[0] );
3607
 
3608
      for ( i=0, j=0; i<nChannels; i++ ) {
3609
        if ( handle->bufferInfos[i].isInput != ASIOTrue )
3610
          memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3611
                  &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3612
      }
3613
 
3614
    }
3615
    else {
3616
 
3617
      if ( stream_.doByteSwap[0] )
3618
        byteSwapBuffer( stream_.userBuffer[0],
3619
                        stream_.bufferSize * stream_.nUserChannels[0],
3620
                        stream_.userFormat );
3621
 
3622
      for ( i=0, j=0; i<nChannels; i++ ) {
3623
        if ( handle->bufferInfos[i].isInput != ASIOTrue )
3624
          memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3625
                  &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3626
      }
3627
 
3628
    }
3629
  }
3630
 
3631
  // Don't bother draining input
3632
  if ( handle->drainCounter ) {
3633
    handle->drainCounter++;
3634
    goto unlock;
3635
  }
3636
 
3637
  if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3638
 
3639
    bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3640
 
3641
    if (stream_.doConvertBuffer[1]) {
3642
 
3643
      // Always interleave ASIO input data.
3644
      for ( i=0, j=0; i<nChannels; i++ ) {
3645
        if ( handle->bufferInfos[i].isInput == ASIOTrue )
3646
          memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3647
                  handle->bufferInfos[i].buffers[bufferIndex],
3648
                  bufferBytes );
3649
      }
3650
 
3651
      if ( stream_.doByteSwap[1] )
3652
        byteSwapBuffer( stream_.deviceBuffer,
3653
                        stream_.bufferSize * stream_.nDeviceChannels[1],
3654
                        stream_.deviceFormat[1] );
3655
      convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3656
 
3657
    }
3658
    else {
3659
      for ( i=0, j=0; i<nChannels; i++ ) {
3660
        if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3661
          memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3662
                  handle->bufferInfos[i].buffers[bufferIndex],
3663
                  bufferBytes );
3664
        }
3665
      }
3666
 
3667
      if ( stream_.doByteSwap[1] )
3668
        byteSwapBuffer( stream_.userBuffer[1],
3669
                        stream_.bufferSize * stream_.nUserChannels[1],
3670
                        stream_.userFormat );
3671
    }
3672
  }
3673
 
3674
 unlock:
3675
  // The following call was suggested by Malte Clasen.  While the API
3676
  // documentation indicates it should not be required, some device
3677
  // drivers apparently do not function correctly without it.
3678
  ASIOOutputReady();
3679
 
3680
  RtApi::tickStreamTime();
3681
  return SUCCESS;
3682
}
3683
 
3684
static void sampleRateChanged( ASIOSampleRate sRate )
3685
{
3686
  // The ASIO documentation says that this usually only happens during
3687
  // external sync.  Audio processing is not stopped by the driver,
3688
  // actual sample rate might not have even changed, maybe only the
3689
  // sample rate status of an AES/EBU or S/PDIF digital input at the
3690
  // audio device.
3691
 
3692
  RtApi *object = (RtApi *) asioCallbackInfo->object;
3693
  try {
3694
    object->stopStream();
3695
  }
3696
  catch ( RtAudioError &exception ) {
3697
    std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3698
    return;
3699
  }
3700
 
3701
  std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3702
}
3703
 
3704
static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3705
{
3706
  long ret = 0;
3707
 
3708
  switch( selector ) {
3709
  case kAsioSelectorSupported:
3710
    if ( value == kAsioResetRequest
3711
         || value == kAsioEngineVersion
3712
         || value == kAsioResyncRequest
3713
         || value == kAsioLatenciesChanged
3714
         // The following three were added for ASIO 2.0, you don't
3715
         // necessarily have to support them.
3716
         || value == kAsioSupportsTimeInfo
3717
         || value == kAsioSupportsTimeCode
3718
         || value == kAsioSupportsInputMonitor)
3719
      ret = 1L;
3720
    break;
3721
  case kAsioResetRequest:
3722
    // Defer the task and perform the reset of the driver during the
3723
    // next "safe" situation.  You cannot reset the driver right now,
3724
    // as this code is called from the driver.  Reset the driver is
3725
    // done by completely destruct is. I.e. ASIOStop(),
3726
    // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3727
    // driver again.
3728
    std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3729
    ret = 1L;
3730
    break;
3731
  case kAsioResyncRequest:
3732
    // This informs the application that the driver encountered some
3733
    // non-fatal data loss.  It is used for synchronization purposes
3734
    // of different media.  Added mainly to work around the Win16Mutex
3735
    // problems in Windows 95/98 with the Windows Multimedia system,
3736
    // which could lose data because the Mutex was held too long by
3737
    // another thread.  However a driver can issue it in other
3738
    // situations, too.
3739
    // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3740
    asioXRun = true;
3741
    ret = 1L;
3742
    break;
3743
  case kAsioLatenciesChanged:
3744
    // This will inform the host application that the drivers were
3745
    // latencies changed.  Beware, it this does not mean that the
3746
    // buffer sizes have changed!  You might need to update internal
3747
    // delay data.
3748
    std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3749
    ret = 1L;
3750
    break;
3751
  case kAsioEngineVersion:
3752
    // Return the supported ASIO version of the host application.  If
3753
    // a host application does not implement this selector, ASIO 1.0
3754
    // is assumed by the driver.
3755
    ret = 2L;
3756
    break;
3757
  case kAsioSupportsTimeInfo:
3758
    // Informs the driver whether the
3759
    // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3760
    // For compatibility with ASIO 1.0 drivers the host application
3761
    // should always support the "old" bufferSwitch method, too.
3762
    ret = 0;
3763
    break;
3764
  case kAsioSupportsTimeCode:
3765
    // Informs the driver whether application is interested in time
3766
    // code info.  If an application does not need to know about time
3767
    // code, the driver has less work to do.
3768
    ret = 0;
3769
    break;
3770
  }
3771
  return ret;
3772
}
3773
 
3774
static const char* getAsioErrorString( ASIOError result )
3775
{
3776
  struct Messages
3777
  {
3778
    ASIOError value;
3779
    const char*message;
3780
  };
3781
 
3782
  static const Messages m[] =
3783
    {
3784
      {   ASE_NotPresent,    "Hardware input or output is not present or available." },
3785
      {   ASE_HWMalfunction,  "Hardware is malfunctioning." },
3786
      {   ASE_InvalidParameter, "Invalid input parameter." },
3787
      {   ASE_InvalidMode,      "Invalid mode." },
3788
      {   ASE_SPNotAdvancing,     "Sample position not advancing." },
3789
      {   ASE_NoClock,            "Sample clock or rate cannot be determined or is not present." },
3790
      {   ASE_NoMemory,           "Not enough memory to complete the request." }
3791
    };
3792
 
3793
  for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3794
    if ( m[i].value == result ) return m[i].message;
3795
 
3796
  return "Unknown error.";
3797
}
3798
 
3799
//******************** End of __WINDOWS_ASIO__ *********************//
3800
#endif
3801
 
3802
 
3803
#if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3804
 
3805
// Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3806
// - Introduces support for the Windows WASAPI API
3807
// - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3808
// - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3809
// - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3810
 
3811
#ifndef INITGUID
3812
  #define INITGUID
3813
#endif
3814
 
3815
#include <mfapi.h>
3816
#include <mferror.h>
3817
#include <mfplay.h>
3818
#include <mftransform.h>
3819
#include <wmcodecdsp.h>
3820
 
3821
#include <audioclient.h>
3822
#include <avrt.h>
3823
#include <mmdeviceapi.h>
3824
#include <functiondiscoverykeys_devpkey.h>
3825
 
3826
#ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3827
  #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3828
#endif
3829
 
3830
#ifndef MFSTARTUP_NOSOCKET
3831
  #define MFSTARTUP_NOSOCKET 0x1
3832
#endif
3833
 
3834
#ifdef _MSC_VER
3835
  #pragma comment( lib, "ksuser" )
3836
  #pragma comment( lib, "mfplat.lib" )
3837
  #pragma comment( lib, "mfuuid.lib" )
3838
  #pragma comment( lib, "wmcodecdspuuid" )
3839
#endif
3840
 
3841
//=============================================================================
3842
 
3843
#define SAFE_RELEASE( objectPtr )\
3844
if ( objectPtr )\
3845
{\
3846
  objectPtr->Release();\
3847
  objectPtr = NULL;\
3848
}
3849
 
3850
typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3851
 
3852
#ifndef __IAudioClient3_INTERFACE_DEFINED__
3853
MIDL_INTERFACE( "00000000-0000-0000-0000-000000000000" ) IAudioClient3
3854
{
3855
  virtual HRESULT GetSharedModeEnginePeriod( WAVEFORMATEX*, UINT32*, UINT32*, UINT32*, UINT32* ) = 0;
3856
  virtual HRESULT InitializeSharedAudioStream( DWORD, UINT32, WAVEFORMATEX*, LPCGUID ) = 0;
3857
};
3858
#ifdef __CRT_UUID_DECL
3859
__CRT_UUID_DECL( IAudioClient3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
3860
#endif
3861
#endif
3862
 
3863
//-----------------------------------------------------------------------------
3864
 
3865
// WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3866
// Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3867
// requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3868
// provide intermediate storage for read / write synchronization.
3869
class WasapiBuffer
3870
{
3871
public:
3872
  WasapiBuffer()
3873
    : buffer_( NULL ),
3874
      bufferSize_( 0 ),
3875
      inIndex_( 0 ),
3876
      outIndex_( 0 ) {}
3877
 
3878
  ~WasapiBuffer() {
3879
    free( buffer_ );
3880
  }
3881
 
3882
  // sets the length of the internal ring buffer
3883
  void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3884
    free( buffer_ );
3885
 
3886
    buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3887
 
3888
    bufferSize_ = bufferSize;
3889
    inIndex_ = 0;
3890
    outIndex_ = 0;
3891
  }
3892
 
3893
  // attempt to push a buffer into the ring buffer at the current "in" index
3894
  bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3895
  {
3896
    if ( !buffer ||                 // incoming buffer is NULL
3897
         bufferSize == 0 ||         // incoming buffer has no data
3898
         bufferSize > bufferSize_ ) // incoming buffer too large
3899
    {
3900
      return false;
3901
    }
3902
 
3903
    unsigned int relOutIndex = outIndex_;
3904
    unsigned int inIndexEnd = inIndex_ + bufferSize;
3905
    if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3906
      relOutIndex += bufferSize_;
3907
    }
3908
 
3909
    // the "IN" index CAN BEGIN at the "OUT" index
3910
    // the "IN" index CANNOT END at the "OUT" index
3911
    if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3912
      return false; // not enough space between "in" index and "out" index
3913
    }
3914
 
3915
    // copy buffer from external to internal
3916
    int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3917
    fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3918
    int fromInSize = bufferSize - fromZeroSize;
3919
 
3920
    switch( format )
3921
      {
3922
      case RTAUDIO_SINT8:
3923
        memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3924
        memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3925
        break;
3926
      case RTAUDIO_SINT16:
3927
        memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3928
        memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3929
        break;
3930
      case RTAUDIO_SINT24:
3931
        memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3932
        memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3933
        break;
3934
      case RTAUDIO_SINT32:
3935
        memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3936
        memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3937
        break;
3938
      case RTAUDIO_FLOAT32:
3939
        memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3940
        memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3941
        break;
3942
      case RTAUDIO_FLOAT64:
3943
        memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3944
        memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3945
        break;
3946
    }
3947
 
3948
    // update "in" index
3949
    inIndex_ += bufferSize;
3950
    inIndex_ %= bufferSize_;
3951
 
3952
    return true;
3953
  }
3954
 
3955
  // attempt to pull a buffer from the ring buffer from the current "out" index
3956
  bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3957
  {
3958
    if ( !buffer ||                 // incoming buffer is NULL
3959
         bufferSize == 0 ||         // incoming buffer has no data
3960
         bufferSize > bufferSize_ ) // incoming buffer too large
3961
    {
3962
      return false;
3963
    }
3964
 
3965
    unsigned int relInIndex = inIndex_;
3966
    unsigned int outIndexEnd = outIndex_ + bufferSize;
3967
    if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3968
      relInIndex += bufferSize_;
3969
    }
3970
 
3971
    // the "OUT" index CANNOT BEGIN at the "IN" index
3972
    // the "OUT" index CAN END at the "IN" index
3973
    if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3974
      return false; // not enough space between "out" index and "in" index
3975
    }
3976
 
3977
    // copy buffer from internal to external
3978
    int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3979
    fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3980
    int fromOutSize = bufferSize - fromZeroSize;
3981
 
3982
    switch( format )
3983
    {
3984
      case RTAUDIO_SINT8:
3985
        memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3986
        memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3987
        break;
3988
      case RTAUDIO_SINT16:
3989
        memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3990
        memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3991
        break;
3992
      case RTAUDIO_SINT24:
3993
        memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3994
        memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3995
        break;
3996
      case RTAUDIO_SINT32:
3997
        memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3998
        memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3999
        break;
4000
      case RTAUDIO_FLOAT32:
4001
        memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
4002
        memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
4003
        break;
4004
      case RTAUDIO_FLOAT64:
4005
        memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
4006
        memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
4007
        break;
4008
    }
4009
 
4010
    // update "out" index
4011
    outIndex_ += bufferSize;
4012
    outIndex_ %= bufferSize_;
4013
 
4014
    return true;
4015
  }
4016
 
4017
private:
4018
  char* buffer_;
4019
  unsigned int bufferSize_;
4020
  unsigned int inIndex_;
4021
  unsigned int outIndex_;
4022
};
4023
 
4024
//-----------------------------------------------------------------------------
4025
 
4026
// In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
4027
// between HW and the user. The WasapiResampler class is used to perform this conversion between
4028
// HwIn->UserIn and UserOut->HwOut during the stream callback loop.
4029
class WasapiResampler
4030
{
4031
public:
4032
  WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
4033
                   unsigned int inSampleRate, unsigned int outSampleRate )
4034
    : _bytesPerSample( bitsPerSample / 8 )
4035
    , _channelCount( channelCount )
4036
    , _sampleRatio( ( float ) outSampleRate / inSampleRate )
4037
    , _transformUnk( NULL )
4038
    , _transform( NULL )
4039
    , _mediaType( NULL )
4040
    , _inputMediaType( NULL )
4041
    , _outputMediaType( NULL )
4042
 
4043
    #ifdef __IWMResamplerProps_FWD_DEFINED__
4044
      , _resamplerProps( NULL )
4045
    #endif
4046
  {
4047
    // 1. Initialization
4048
 
4049
    MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4050
 
4051
    // 2. Create Resampler Transform Object
4052
 
4053
    CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4054
                      IID_IUnknown, ( void** ) &_transformUnk );
4055
 
4056
    _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4057
 
4058
    #ifdef __IWMResamplerProps_FWD_DEFINED__
4059
      _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4060
      _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4061
    #endif
4062
 
4063
    // 3. Specify input / output format
4064
 
4065
    MFCreateMediaType( &_mediaType );
4066
    _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4067
    _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4068
    _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4069
    _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4070
    _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4071
    _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4072
    _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4073
    _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4074
 
4075
    MFCreateMediaType( &_inputMediaType );
4076
    _mediaType->CopyAllItems( _inputMediaType );
4077
 
4078
    _transform->SetInputType( 0, _inputMediaType, 0 );
4079
 
4080
    MFCreateMediaType( &_outputMediaType );
4081
    _mediaType->CopyAllItems( _outputMediaType );
4082
 
4083
    _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4084
    _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4085
 
4086
    _transform->SetOutputType( 0, _outputMediaType, 0 );
4087
 
4088
    // 4. Send stream start messages to Resampler
4089
 
4090
    _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4091
    _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4092
    _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4093
  }
4094
 
4095
  ~WasapiResampler()
4096
  {
4097
    // 8. Send stream stop messages to Resampler
4098
 
4099
    _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4100
    _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4101
 
4102
    // 9. Cleanup
4103
 
4104
    MFShutdown();
4105
 
4106
    SAFE_RELEASE( _transformUnk );
4107
    SAFE_RELEASE( _transform );
4108
    SAFE_RELEASE( _mediaType );
4109
    SAFE_RELEASE( _inputMediaType );
4110
    SAFE_RELEASE( _outputMediaType );
4111
 
4112
    #ifdef __IWMResamplerProps_FWD_DEFINED__
4113
      SAFE_RELEASE( _resamplerProps );
4114
    #endif
4115
  }
4116
 
4117
  void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount, int maxOutSampleCount = -1 )
4118
  {
4119
    unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4120
    if ( _sampleRatio == 1 )
4121
    {
4122
      // no sample rate conversion required
4123
      memcpy( outBuffer, inBuffer, inputBufferSize );
4124
      outSampleCount = inSampleCount;
4125
      return;
4126
    }
4127
 
4128
    unsigned int outputBufferSize = 0;
4129
    if ( maxOutSampleCount != -1 )
4130
    {
4131
      outputBufferSize = _bytesPerSample * _channelCount * maxOutSampleCount;
4132
    }
4133
    else
4134
    {
4135
      outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4136
    }
4137
 
4138
    IMFMediaBuffer* rInBuffer;
4139
    IMFSample* rInSample;
4140
    BYTE* rInByteBuffer = NULL;
4141
 
4142
    // 5. Create Sample object from input data
4143
 
4144
    MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4145
 
4146
    rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4147
    memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4148
    rInBuffer->Unlock();
4149
    rInByteBuffer = NULL;
4150
 
4151
    rInBuffer->SetCurrentLength( inputBufferSize );
4152
 
4153
    MFCreateSample( &rInSample );
4154
    rInSample->AddBuffer( rInBuffer );
4155
 
4156
    // 6. Pass input data to Resampler
4157
 
4158
    _transform->ProcessInput( 0, rInSample, 0 );
4159
 
4160
    SAFE_RELEASE( rInBuffer );
4161
    SAFE_RELEASE( rInSample );
4162
 
4163
    // 7. Perform sample rate conversion
4164
 
4165
    IMFMediaBuffer* rOutBuffer = NULL;
4166
    BYTE* rOutByteBuffer = NULL;
4167
 
4168
    MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4169
    DWORD rStatus;
4170
    DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4171
 
4172
    // 7.1 Create Sample object for output data
4173
 
4174
    memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4175
    MFCreateSample( &( rOutDataBuffer.pSample ) );
4176
    MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4177
    rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4178
    rOutDataBuffer.dwStreamID = 0;
4179
    rOutDataBuffer.dwStatus = 0;
4180
    rOutDataBuffer.pEvents = NULL;
4181
 
4182
    // 7.2 Get output data from Resampler
4183
 
4184
    if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4185
    {
4186
      outSampleCount = 0;
4187
      SAFE_RELEASE( rOutBuffer );
4188
      SAFE_RELEASE( rOutDataBuffer.pSample );
4189
      return;
4190
    }
4191
 
4192
    // 7.3 Write output data to outBuffer
4193
 
4194
    SAFE_RELEASE( rOutBuffer );
4195
    rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4196
    rOutBuffer->GetCurrentLength( &rBytes );
4197
 
4198
    rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4199
    memcpy( outBuffer, rOutByteBuffer, rBytes );
4200
    rOutBuffer->Unlock();
4201
    rOutByteBuffer = NULL;
4202
 
4203
    outSampleCount = rBytes / _bytesPerSample / _channelCount;
4204
    SAFE_RELEASE( rOutBuffer );
4205
    SAFE_RELEASE( rOutDataBuffer.pSample );
4206
  }
4207
 
4208
private:
4209
  unsigned int _bytesPerSample;
4210
  unsigned int _channelCount;
4211
  float _sampleRatio;
4212
 
4213
  IUnknown* _transformUnk;
4214
  IMFTransform* _transform;
4215
  IMFMediaType* _mediaType;
4216
  IMFMediaType* _inputMediaType;
4217
  IMFMediaType* _outputMediaType;
4218
 
4219
  #ifdef __IWMResamplerProps_FWD_DEFINED__
4220
    IWMResamplerProps* _resamplerProps;
4221
  #endif
4222
};
4223
 
4224
//-----------------------------------------------------------------------------
4225
 
4226
// A structure to hold various information related to the WASAPI implementation.
4227
struct WasapiHandle
4228
{
4229
  IAudioClient* captureAudioClient;
4230
  IAudioClient* renderAudioClient;
4231
  IAudioCaptureClient* captureClient;
4232
  IAudioRenderClient* renderClient;
4233
  HANDLE captureEvent;
4234
  HANDLE renderEvent;
4235
 
4236
  WasapiHandle()
4237
  : captureAudioClient( NULL ),
4238
    renderAudioClient( NULL ),
4239
    captureClient( NULL ),
4240
    renderClient( NULL ),
4241
    captureEvent( NULL ),
4242
    renderEvent( NULL ) {}
4243
};
4244
 
4245
//=============================================================================
4246
 
4247
RtApiWasapi::RtApiWasapi()
4248
  : coInitialized_( false ), deviceEnumerator_( NULL )
4249
{
4250
  // WASAPI can run either apartment or multi-threaded
4251
  HRESULT hr = CoInitialize( NULL );
4252
  if ( !FAILED( hr ) )
4253
    coInitialized_ = true;
4254
 
4255
  // Instantiate device enumerator
4256
  hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4257
                         CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4258
                         ( void** ) &deviceEnumerator_ );
4259
 
4260
  // If this runs on an old Windows, it will fail. Ignore and proceed.
4261
  if ( FAILED( hr ) )
4262
    deviceEnumerator_ = NULL;
4263
}
4264
 
4265
//-----------------------------------------------------------------------------
4266
 
4267
RtApiWasapi::~RtApiWasapi()
4268
{
4269
  if ( stream_.state != STREAM_CLOSED )
4270
    closeStream();
4271
 
4272
  SAFE_RELEASE( deviceEnumerator_ );
4273
 
4274
  // If this object previously called CoInitialize()
4275
  if ( coInitialized_ )
4276
    CoUninitialize();
4277
}
4278
 
4279
//=============================================================================
4280
 
4281
unsigned int RtApiWasapi::getDeviceCount( void )
4282
{
4283
  unsigned int captureDeviceCount = 0;
4284
  unsigned int renderDeviceCount = 0;
4285
 
4286
  IMMDeviceCollection* captureDevices = NULL;
4287
  IMMDeviceCollection* renderDevices = NULL;
4288
 
4289
  if ( !deviceEnumerator_ )
4290
    return 0;
4291
 
4292
  // Count capture devices
4293
  errorText_.clear();
4294
  HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4295
  if ( FAILED( hr ) ) {
4296
    errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4297
    goto Exit;
4298
  }
4299
 
4300
  hr = captureDevices->GetCount( &captureDeviceCount );
4301
  if ( FAILED( hr ) ) {
4302
    errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4303
    goto Exit;
4304
  }
4305
 
4306
  // Count render devices
4307
  hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4308
  if ( FAILED( hr ) ) {
4309
    errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4310
    goto Exit;
4311
  }
4312
 
4313
  hr = renderDevices->GetCount( &renderDeviceCount );
4314
  if ( FAILED( hr ) ) {
4315
    errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4316
    goto Exit;
4317
  }
4318
 
4319
Exit:
4320
  // release all references
4321
  SAFE_RELEASE( captureDevices );
4322
  SAFE_RELEASE( renderDevices );
4323
 
4324
  if ( errorText_.empty() )
4325
    return captureDeviceCount + renderDeviceCount;
4326
 
4327
  error( RtAudioError::DRIVER_ERROR );
4328
  return 0;
4329
}
4330
 
4331
//-----------------------------------------------------------------------------
4332
 
4333
RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4334
{
4335
  RtAudio::DeviceInfo info;
4336
  unsigned int captureDeviceCount = 0;
4337
  unsigned int renderDeviceCount = 0;
4338
  std::string defaultDeviceName;
4339
  bool isCaptureDevice = false;
4340
 
4341
  PROPVARIANT deviceNameProp;
4342
  PROPVARIANT defaultDeviceNameProp;
4343
 
4344
  IMMDeviceCollection* captureDevices = NULL;
4345
  IMMDeviceCollection* renderDevices = NULL;
4346
  IMMDevice* devicePtr = NULL;
4347
  IMMDevice* defaultDevicePtr = NULL;
4348
  IAudioClient* audioClient = NULL;
4349
  IPropertyStore* devicePropStore = NULL;
4350
  IPropertyStore* defaultDevicePropStore = NULL;
4351
 
4352
  WAVEFORMATEX* deviceFormat = NULL;
4353
  WAVEFORMATEX* closestMatchFormat = NULL;
4354
 
4355
  // probed
4356
  info.probed = false;
4357
 
4358
  // Count capture devices
4359
  errorText_.clear();
4360
  RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4361
  HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4362
  if ( FAILED( hr ) ) {
4363
    errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4364
    goto Exit;
4365
  }
4366
 
4367
  hr = captureDevices->GetCount( &captureDeviceCount );
4368
  if ( FAILED( hr ) ) {
4369
    errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4370
    goto Exit;
4371
  }
4372
 
4373
  // Count render devices
4374
  hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4375
  if ( FAILED( hr ) ) {
4376
    errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4377
    goto Exit;
4378
  }
4379
 
4380
  hr = renderDevices->GetCount( &renderDeviceCount );
4381
  if ( FAILED( hr ) ) {
4382
    errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4383
    goto Exit;
4384
  }
4385
 
4386
  // validate device index
4387
  if ( device >= captureDeviceCount + renderDeviceCount ) {
4388
    errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4389
    errorType = RtAudioError::INVALID_USE;
4390
    goto Exit;
4391
  }
4392
 
4393
  // determine whether index falls within capture or render devices
4394
  if ( device >= renderDeviceCount ) {
4395
    hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4396
    if ( FAILED( hr ) ) {
4397
      errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4398
      goto Exit;
4399
    }
4400
    isCaptureDevice = true;
4401
  }
4402
  else {
4403
    hr = renderDevices->Item( device, &devicePtr );
4404
    if ( FAILED( hr ) ) {
4405
      errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4406
      goto Exit;
4407
    }
4408
    isCaptureDevice = false;
4409
  }
4410
 
4411
  // get default device name
4412
  if ( isCaptureDevice ) {
4413
    hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4414
    if ( FAILED( hr ) ) {
4415
      errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4416
      goto Exit;
4417
    }
4418
  }
4419
  else {
4420
    hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4421
    if ( FAILED( hr ) ) {
4422
      errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4423
      goto Exit;
4424
    }
4425
  }
4426
 
4427
  hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4428
  if ( FAILED( hr ) ) {
4429
    errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4430
    goto Exit;
4431
  }
4432
  PropVariantInit( &defaultDeviceNameProp );
4433
 
4434
  hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4435
  if ( FAILED( hr ) ) {
4436
    errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4437
    goto Exit;
4438
  }
4439
 
4440
  defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4441
 
4442
  // name
4443
  hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4444
  if ( FAILED( hr ) ) {
4445
    errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4446
    goto Exit;
4447
  }
4448
 
4449
  PropVariantInit( &deviceNameProp );
4450
 
4451
  hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4452
  if ( FAILED( hr ) ) {
4453
    errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4454
    goto Exit;
4455
  }
4456
 
4457
  info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4458
 
4459
  // is default
4460
  if ( isCaptureDevice ) {
4461
    info.isDefaultInput = info.name == defaultDeviceName;
4462
    info.isDefaultOutput = false;
4463
  }
4464
  else {
4465
    info.isDefaultInput = false;
4466
    info.isDefaultOutput = info.name == defaultDeviceName;
4467
  }
4468
 
4469
  // channel count
4470
  hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4471
  if ( FAILED( hr ) ) {
4472
    errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4473
    goto Exit;
4474
  }
4475
 
4476
  hr = audioClient->GetMixFormat( &deviceFormat );
4477
  if ( FAILED( hr ) ) {
4478
    errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4479
    goto Exit;
4480
  }
4481
 
4482
  if ( isCaptureDevice ) {
4483
    info.inputChannels = deviceFormat->nChannels;
4484
    info.outputChannels = 0;
4485
    info.duplexChannels = 0;
4486
  }
4487
  else {
4488
    info.inputChannels = 0;
4489
    info.outputChannels = deviceFormat->nChannels;
4490
    info.duplexChannels = 0;
4491
  }
4492
 
4493
  // sample rates
4494
  info.sampleRates.clear();
4495
 
4496
  // allow support for all sample rates as we have a built-in sample rate converter
4497
  for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4498
    info.sampleRates.push_back( SAMPLE_RATES[i] );
4499
  }
4500
  info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4501
 
4502
  // native format
4503
  info.nativeFormats = 0;
4504
 
4505
  if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4506
       ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4507
         ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4508
  {
4509
    if ( deviceFormat->wBitsPerSample == 32 ) {
4510
      info.nativeFormats |= RTAUDIO_FLOAT32;
4511
    }
4512
    else if ( deviceFormat->wBitsPerSample == 64 ) {
4513
      info.nativeFormats |= RTAUDIO_FLOAT64;
4514
    }
4515
  }
4516
  else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4517
           ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4518
             ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4519
  {
4520
    if ( deviceFormat->wBitsPerSample == 8 ) {
4521
      info.nativeFormats |= RTAUDIO_SINT8;
4522
    }
4523
    else if ( deviceFormat->wBitsPerSample == 16 ) {
4524
      info.nativeFormats |= RTAUDIO_SINT16;
4525
    }
4526
    else if ( deviceFormat->wBitsPerSample == 24 ) {
4527
      info.nativeFormats |= RTAUDIO_SINT24;
4528
    }
4529
    else if ( deviceFormat->wBitsPerSample == 32 ) {
4530
      info.nativeFormats |= RTAUDIO_SINT32;
4531
    }
4532
  }
4533
 
4534
  // probed
4535
  info.probed = true;
4536
 
4537
Exit:
4538
  // release all references
4539
  PropVariantClear( &deviceNameProp );
4540
  PropVariantClear( &defaultDeviceNameProp );
4541
 
4542
  SAFE_RELEASE( captureDevices );
4543
  SAFE_RELEASE( renderDevices );
4544
  SAFE_RELEASE( devicePtr );
4545
  SAFE_RELEASE( defaultDevicePtr );
4546
  SAFE_RELEASE( audioClient );
4547
  SAFE_RELEASE( devicePropStore );
4548
  SAFE_RELEASE( defaultDevicePropStore );
4549
 
4550
  CoTaskMemFree( deviceFormat );
4551
  CoTaskMemFree( closestMatchFormat );
4552
 
4553
  if ( !errorText_.empty() )
4554
    error( errorType );
4555
  return info;
4556
}
4557
 
4558
void RtApiWasapi::closeStream( void )
4559
{
4560
  if ( stream_.state == STREAM_CLOSED ) {
4561
    errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4562
    error( RtAudioError::WARNING );
4563
    return;
4564
  }
4565
 
4566
  if ( stream_.state != STREAM_STOPPED )
4567
    stopStream();
4568
 
4569
  // clean up stream memory
4570
  SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4571
  SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4572
 
4573
  SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4574
  SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4575
 
4576
  if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4577
    CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4578
 
4579
  if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4580
    CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4581
 
4582
  delete ( WasapiHandle* ) stream_.apiHandle;
4583
  stream_.apiHandle = NULL;
4584
 
4585
  for ( int i = 0; i < 2; i++ ) {
4586
    if ( stream_.userBuffer[i] ) {
4587
      free( stream_.userBuffer[i] );
4588
      stream_.userBuffer[i] = 0;
4589
    }
4590
  }
4591
 
4592
  if ( stream_.deviceBuffer ) {
4593
    free( stream_.deviceBuffer );
4594
    stream_.deviceBuffer = 0;
4595
  }
4596
 
4597
  // update stream state
4598
  stream_.state = STREAM_CLOSED;
4599
}
4600
 
4601
//-----------------------------------------------------------------------------
4602
 
4603
void RtApiWasapi::startStream( void )
4604
{
4605
  verifyStream();
4606
 
4607
  if ( stream_.state == STREAM_RUNNING ) {
4608
    errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4609
    error( RtAudioError::WARNING );
4610
    return;
4611
  }
4612
 
4613
  #if defined( HAVE_GETTIMEOFDAY )
4614
  gettimeofday( &stream_.lastTickTimestamp, NULL );
4615
  #endif
4616
 
4617
  // update stream state
4618
  stream_.state = STREAM_RUNNING;
4619
 
4620
  // create WASAPI stream thread
4621
  stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4622
 
4623
  if ( !stream_.callbackInfo.thread ) {
4624
    errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4625
    error( RtAudioError::THREAD_ERROR );
4626
  }
4627
  else {
4628
    SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4629
    ResumeThread( ( void* ) stream_.callbackInfo.thread );
4630
  }
4631
}
4632
 
4633
//-----------------------------------------------------------------------------
4634
 
4635
void RtApiWasapi::stopStream( void )
4636
{
4637
  verifyStream();
4638
 
4639
  if ( stream_.state == STREAM_STOPPED ) {
4640
    errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4641
    error( RtAudioError::WARNING );
4642
    return;
4643
  }
4644
  if ( stream_.state == STREAM_STOPPING ) {
4645
    errorText_ = "RtApiWasapi::stopStream: The stream is already stopping.";
4646
    error( RtAudioError::WARNING );
4647
    return;
4648
  }
4649
 
4650
  // inform stream thread by setting stream state to STREAM_STOPPING
4651
  stream_.state = STREAM_STOPPING;
4652
 
4653
  WaitForSingleObject( ( void* ) stream_.callbackInfo.thread, INFINITE );
4654
 
4655
  // close thread handle
4656
  if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4657
    errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4658
    error( RtAudioError::THREAD_ERROR );
4659
    return;
4660
  }
4661
 
4662
  stream_.callbackInfo.thread = (ThreadHandle) NULL;
4663
}
4664
 
4665
//-----------------------------------------------------------------------------
4666
 
4667
void RtApiWasapi::abortStream( void )
4668
{
4669
  verifyStream();
4670
 
4671
  if ( stream_.state == STREAM_STOPPED ) {
4672
    errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4673
    error( RtAudioError::WARNING );
4674
    return;
4675
  }
4676
  if ( stream_.state == STREAM_STOPPING ) {
4677
    errorText_ = "RtApiWasapi::abortStream: The stream is already stopping.";
4678
    error( RtAudioError::WARNING );
4679
    return;
4680
  }
4681
 
4682
  // inform stream thread by setting stream state to STREAM_STOPPING
4683
  stream_.state = STREAM_STOPPING;
4684
 
4685
  WaitForSingleObject( ( void* ) stream_.callbackInfo.thread, INFINITE );
4686
 
4687
  // close thread handle
4688
  if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4689
    errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4690
    error( RtAudioError::THREAD_ERROR );
4691
    return;
4692
  }
4693
 
4694
  stream_.callbackInfo.thread = (ThreadHandle) NULL;
4695
}
4696
 
4697
//-----------------------------------------------------------------------------
4698
 
4699
bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4700
                                   unsigned int firstChannel, unsigned int sampleRate,
4701
                                   RtAudioFormat format, unsigned int* bufferSize,
4702
                                   RtAudio::StreamOptions* options )
4703
{
4704
  bool methodResult = FAILURE;
4705
  unsigned int captureDeviceCount = 0;
4706
  unsigned int renderDeviceCount = 0;
4707
 
4708
  IMMDeviceCollection* captureDevices = NULL;
4709
  IMMDeviceCollection* renderDevices = NULL;
4710
  IMMDevice* devicePtr = NULL;
4711
  WAVEFORMATEX* deviceFormat = NULL;
4712
  unsigned int bufferBytes;
4713
  stream_.state = STREAM_STOPPED;
4714
 
4715
  // create API Handle if not already created
4716
  if ( !stream_.apiHandle )
4717
    stream_.apiHandle = ( void* ) new WasapiHandle();
4718
 
4719
  // Count capture devices
4720
  errorText_.clear();
4721
  RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4722
  HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4723
  if ( FAILED( hr ) ) {
4724
    errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4725
    goto Exit;
4726
  }
4727
 
4728
  hr = captureDevices->GetCount( &captureDeviceCount );
4729
  if ( FAILED( hr ) ) {
4730
    errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4731
    goto Exit;
4732
  }
4733
 
4734
  // Count render devices
4735
  hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4736
  if ( FAILED( hr ) ) {
4737
    errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4738
    goto Exit;
4739
  }
4740
 
4741
  hr = renderDevices->GetCount( &renderDeviceCount );
4742
  if ( FAILED( hr ) ) {
4743
    errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4744
    goto Exit;
4745
  }
4746
 
4747
  // validate device index
4748
  if ( device >= captureDeviceCount + renderDeviceCount ) {
4749
    errorType = RtAudioError::INVALID_USE;
4750
    errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4751
    goto Exit;
4752
  }
4753
 
4754
  // if device index falls within capture devices
4755
  if ( device >= renderDeviceCount ) {
4756
    if ( mode != INPUT ) {
4757
      errorType = RtAudioError::INVALID_USE;
4758
      errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4759
      goto Exit;
4760
    }
4761
 
4762
    // retrieve captureAudioClient from devicePtr
4763
    IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4764
 
4765
    hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4766
    if ( FAILED( hr ) ) {
4767
      errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4768
      goto Exit;
4769
    }
4770
 
4771
    hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4772
                              NULL, ( void** ) &captureAudioClient );
4773
    if ( FAILED( hr ) ) {
4774
      errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4775
      goto Exit;
4776
    }
4777
 
4778
    hr = captureAudioClient->GetMixFormat( &deviceFormat );
4779
    if ( FAILED( hr ) ) {
4780
      errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4781
      goto Exit;
4782
    }
4783
 
4784
    stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4785
    captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4786
  }
4787
 
4788
  // if device index falls within render devices and is configured for loopback
4789
  if ( device < renderDeviceCount && mode == INPUT )
4790
  {
4791
    // if renderAudioClient is not initialised, initialise it now
4792
    IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4793
    if ( !renderAudioClient )
4794
    {
4795
      probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4796
    }
4797
 
4798
    // retrieve captureAudioClient from devicePtr
4799
    IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4800
 
4801
    hr = renderDevices->Item( device, &devicePtr );
4802
    if ( FAILED( hr ) ) {
4803
      errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4804
      goto Exit;
4805
    }
4806
 
4807
    hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4808
                              NULL, ( void** ) &captureAudioClient );
4809
    if ( FAILED( hr ) ) {
4810
      errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4811
      goto Exit;
4812
    }
4813
 
4814
    hr = captureAudioClient->GetMixFormat( &deviceFormat );
4815
    if ( FAILED( hr ) ) {
4816
      errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4817
      goto Exit;
4818
    }
4819
 
4820
    stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4821
    captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4822
  }
4823
 
4824
  // if device index falls within render devices and is configured for output
4825
  if ( device < renderDeviceCount && mode == OUTPUT )
4826
  {
4827
    // if renderAudioClient is already initialised, don't initialise it again
4828
    IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4829
    if ( renderAudioClient )
4830
    {
4831
      methodResult = SUCCESS;
4832
      goto Exit;
4833
    }
4834
 
4835
    hr = renderDevices->Item( device, &devicePtr );
4836
    if ( FAILED( hr ) ) {
4837
      errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4838
      goto Exit;
4839
    }
4840
 
4841
    hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4842
                              NULL, ( void** ) &renderAudioClient );
4843
    if ( FAILED( hr ) ) {
4844
      errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4845
      goto Exit;
4846
    }
4847
 
4848
    hr = renderAudioClient->GetMixFormat( &deviceFormat );
4849
    if ( FAILED( hr ) ) {
4850
      errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4851
      goto Exit;
4852
    }
4853
 
4854
    stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4855
    renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4856
  }
4857
 
4858
  // fill stream data
4859
  if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4860
       ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4861
    stream_.mode = DUPLEX;
4862
  }
4863
  else {
4864
    stream_.mode = mode;
4865
  }
4866
 
4867
  stream_.device[mode] = device;
4868
  stream_.doByteSwap[mode] = false;
4869
  stream_.sampleRate = sampleRate;
4870
  stream_.bufferSize = *bufferSize;
4871
  stream_.nBuffers = 1;
4872
  stream_.nUserChannels[mode] = channels;
4873
  stream_.channelOffset[mode] = firstChannel;
4874
  stream_.userFormat = format;
4875
  stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4876
 
4877
  if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4878
    stream_.userInterleaved = false;
4879
  else
4880
    stream_.userInterleaved = true;
4881
  stream_.deviceInterleaved[mode] = true;
4882
 
4883
  // Set flags for buffer conversion.
4884
  stream_.doConvertBuffer[mode] = false;
4885
  if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4886
       stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4887
       stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4888
    stream_.doConvertBuffer[mode] = true;
4889
  else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4890
            stream_.nUserChannels[mode] > 1 )
4891
    stream_.doConvertBuffer[mode] = true;
4892
 
4893
  if ( stream_.doConvertBuffer[mode] )
4894
    setConvertInfo( mode, firstChannel );
4895
 
4896
  // Allocate necessary internal buffers
4897
  bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4898
 
4899
  stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4900
  if ( !stream_.userBuffer[mode] ) {
4901
    errorType = RtAudioError::MEMORY_ERROR;
4902
    errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4903
    goto Exit;
4904
  }
4905
 
4906
  if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4907
    stream_.callbackInfo.priority = 15;
4908
  else
4909
    stream_.callbackInfo.priority = 0;
4910
 
4911
  ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4912
  ///! TODO: RTAUDIO_HOG_DEVICE       // Exclusive mode
4913
 
4914
  methodResult = SUCCESS;
4915
 
4916
Exit:
4917
  //clean up
4918
  SAFE_RELEASE( captureDevices );
4919
  SAFE_RELEASE( renderDevices );
4920
  SAFE_RELEASE( devicePtr );
4921
  CoTaskMemFree( deviceFormat );
4922
 
4923
  // if method failed, close the stream
4924
  if ( methodResult == FAILURE )
4925
    closeStream();
4926
 
4927
  if ( !errorText_.empty() )
4928
    error( errorType );
4929
  return methodResult;
4930
}
4931
 
4932
//=============================================================================
4933
 
4934
DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4935
{
4936
  if ( wasapiPtr )
4937
    ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4938
 
4939
  return 0;
4940
}
4941
 
4942
DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4943
{
4944
  if ( wasapiPtr )
4945
    ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4946
 
4947
  return 0;
4948
}
4949
 
4950
DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4951
{
4952
  if ( wasapiPtr )
4953
    ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4954
 
4955
  return 0;
4956
}
4957
 
4958
//-----------------------------------------------------------------------------
4959
 
4960
void RtApiWasapi::wasapiThread()
4961
{
4962
  // as this is a new thread, we must CoInitialize it
4963
  CoInitialize( NULL );
4964
 
4965
  HRESULT hr;
4966
 
4967
  IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4968
  IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4969
  IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4970
  IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4971
  HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4972
  HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4973
 
4974
  WAVEFORMATEX* captureFormat = NULL;
4975
  WAVEFORMATEX* renderFormat = NULL;
4976
  float captureSrRatio = 0.0f;
4977
  float renderSrRatio = 0.0f;
4978
  WasapiBuffer captureBuffer;
4979
  WasapiBuffer renderBuffer;
4980
  WasapiResampler* captureResampler = NULL;
4981
  WasapiResampler* renderResampler = NULL;
4982
 
4983
  // declare local stream variables
4984
  RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4985
  BYTE* streamBuffer = NULL;
4986
  DWORD captureFlags = 0;
4987
  unsigned int bufferFrameCount = 0;
4988
  unsigned int numFramesPadding = 0;
4989
  unsigned int convBufferSize = 0;
4990
  bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4991
  bool callbackPushed = true;
4992
  bool callbackPulled = false;
4993
  bool callbackStopped = false;
4994
  int callbackResult = 0;
4995
 
4996
  // convBuffer is used to store converted buffers between WASAPI and the user
4997
  char* convBuffer = NULL;
4998
  unsigned int convBuffSize = 0;
4999
  unsigned int deviceBuffSize = 0;
5000
 
5001
  std::string errorText;
5002
  RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
5003
 
5004
  // Attempt to assign "Pro Audio" characteristic to thread
5005
  HMODULE AvrtDll = LoadLibraryW( L"AVRT.dll" );
5006
  if ( AvrtDll ) {
5007
    DWORD taskIndex = 0;
5008
    TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
5009
      ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5010
    AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5011
    FreeLibrary( AvrtDll );
5012
  }
5013
 
5014
  // start capture stream if applicable
5015
  if ( captureAudioClient ) {
5016
    hr = captureAudioClient->GetMixFormat( &captureFormat );
5017
    if ( FAILED( hr ) ) {
5018
      errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5019
      goto Exit;
5020
    }
5021
 
5022
    // init captureResampler
5023
    captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5024
                                            formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5025
                                            captureFormat->nSamplesPerSec, stream_.sampleRate );
5026
 
5027
    captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5028
 
5029
    if ( !captureClient ) {
5030
      IAudioClient3* captureAudioClient3 = nullptr;
5031
      captureAudioClient->QueryInterface( __uuidof( IAudioClient3 ), ( void** ) &captureAudioClient3 );
5032
      if ( captureAudioClient3 && !loopbackEnabled )
5033
      {
5034
        UINT32 Ignore;
5035
        UINT32 MinPeriodInFrames;
5036
        hr = captureAudioClient3->GetSharedModeEnginePeriod( captureFormat,
5037
                                                             &Ignore,
5038
                                                             &Ignore,
5039
                                                             &MinPeriodInFrames,
5040
                                                             &Ignore );
5041
        if ( FAILED( hr ) ) {
5042
          errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5043
          goto Exit;
5044
        }
5045
 
5046
        hr = captureAudioClient3->InitializeSharedAudioStream( AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5047
                                                               MinPeriodInFrames,
5048
                                                               captureFormat,
5049
                                                               NULL );
5050
      }
5051
      else
5052
      {
5053
        hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5054
                                             loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5055
                                             0,
5056
                                             0,
5057
                                             captureFormat,
5058
                                             NULL );
5059
      }
5060
 
5061
      if ( FAILED( hr ) ) {
5062
        errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5063
        goto Exit;
5064
      }
5065
 
5066
      hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5067
                                           ( void** ) &captureClient );
5068
      if ( FAILED( hr ) ) {
5069
        errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5070
        goto Exit;
5071
      }
5072
 
5073
      // don't configure captureEvent if in loopback mode
5074
      if ( !loopbackEnabled )
5075
      {
5076
        // configure captureEvent to trigger on every available capture buffer
5077
        captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5078
        if ( !captureEvent ) {
5079
          errorType = RtAudioError::SYSTEM_ERROR;
5080
          errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5081
          goto Exit;
5082
        }
5083
 
5084
        hr = captureAudioClient->SetEventHandle( captureEvent );
5085
        if ( FAILED( hr ) ) {
5086
          errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5087
          goto Exit;
5088
        }
5089
 
5090
        ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5091
      }
5092
 
5093
      ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5094
 
5095
      // reset the capture stream
5096
      hr = captureAudioClient->Reset();
5097
      if ( FAILED( hr ) ) {
5098
        errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5099
        goto Exit;
5100
      }
5101
 
5102
      // start the capture stream
5103
      hr = captureAudioClient->Start();
5104
      if ( FAILED( hr ) ) {
5105
        errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5106
        goto Exit;
5107
      }
5108
    }
5109
 
5110
    unsigned int inBufferSize = 0;
5111
    hr = captureAudioClient->GetBufferSize( &inBufferSize );
5112
    if ( FAILED( hr ) ) {
5113
      errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5114
      goto Exit;
5115
    }
5116
 
5117
    // scale outBufferSize according to stream->user sample rate ratio
5118
    unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5119
    inBufferSize *= stream_.nDeviceChannels[INPUT];
5120
 
5121
    // set captureBuffer size
5122
    captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5123
  }
5124
 
5125
  // start render stream if applicable
5126
  if ( renderAudioClient ) {
5127
    hr = renderAudioClient->GetMixFormat( &renderFormat );
5128
    if ( FAILED( hr ) ) {
5129
      errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5130
      goto Exit;
5131
    }
5132
 
5133
    // init renderResampler
5134
    renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5135
                                           formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5136
                                           stream_.sampleRate, renderFormat->nSamplesPerSec );
5137
 
5138
    renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5139
 
5140
    if ( !renderClient ) {
5141
      IAudioClient3* renderAudioClient3 = nullptr;
5142
      renderAudioClient->QueryInterface( __uuidof( IAudioClient3 ), ( void** ) &renderAudioClient3 );
5143
      if ( renderAudioClient3 )
5144
      {
5145
        UINT32 Ignore;
5146
        UINT32 MinPeriodInFrames;
5147
        hr = renderAudioClient3->GetSharedModeEnginePeriod( renderFormat,
5148
                                                            &Ignore,
5149
                                                            &Ignore,
5150
                                                            &MinPeriodInFrames,
5151
                                                            &Ignore );
5152
        if ( FAILED( hr ) ) {
5153
          errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5154
          goto Exit;
5155
        }
5156
 
5157
        hr = renderAudioClient3->InitializeSharedAudioStream( AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5158
                                                              MinPeriodInFrames,
5159
                                                              renderFormat,
5160
                                                              NULL );
5161
      }
5162
      else
5163
      {
5164
        hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5165
                                            AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5166
                                            0,
5167
                                            0,
5168
                                            renderFormat,
5169
                                            NULL );
5170
      }
5171
 
5172
      if ( FAILED( hr ) ) {
5173
        errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5174
        goto Exit;
5175
      }
5176
 
5177
      hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5178
                                          ( void** ) &renderClient );
5179
      if ( FAILED( hr ) ) {
5180
        errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5181
        goto Exit;
5182
      }
5183
 
5184
      // configure renderEvent to trigger on every available render buffer
5185
      renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5186
      if ( !renderEvent ) {
5187
        errorType = RtAudioError::SYSTEM_ERROR;
5188
        errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5189
        goto Exit;
5190
      }
5191
 
5192
      hr = renderAudioClient->SetEventHandle( renderEvent );
5193
      if ( FAILED( hr ) ) {
5194
        errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5195
        goto Exit;
5196
      }
5197
 
5198
      ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5199
      ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5200
 
5201
      // reset the render stream
5202
      hr = renderAudioClient->Reset();
5203
      if ( FAILED( hr ) ) {
5204
        errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5205
        goto Exit;
5206
      }
5207
 
5208
      // start the render stream
5209
      hr = renderAudioClient->Start();
5210
      if ( FAILED( hr ) ) {
5211
        errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5212
        goto Exit;
5213
      }
5214
    }
5215
 
5216
    unsigned int outBufferSize = 0;
5217
    hr = renderAudioClient->GetBufferSize( &outBufferSize );
5218
    if ( FAILED( hr ) ) {
5219
      errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5220
      goto Exit;
5221
    }
5222
 
5223
    // scale inBufferSize according to user->stream sample rate ratio
5224
    unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5225
    outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5226
 
5227
    // set renderBuffer size
5228
    renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5229
  }
5230
 
5231
  // malloc buffer memory
5232
  if ( stream_.mode == INPUT )
5233
  {
5234
    using namespace std; // for ceilf
5235
    convBuffSize = ( unsigned int ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5236
    deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5237
  }
5238
  else if ( stream_.mode == OUTPUT )
5239
  {
5240
    convBuffSize = ( unsigned int ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5241
    deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5242
  }
5243
  else if ( stream_.mode == DUPLEX )
5244
  {
5245
    convBuffSize = std::max( ( unsigned int ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5246
                             ( unsigned int ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5247
    deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5248
                               stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5249
  }
5250
 
5251
  convBuffSize *= 2; // allow overflow for *SrRatio remainders
5252
  convBuffer = ( char* ) calloc( convBuffSize, 1 );
5253
  stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5254
  if ( !convBuffer || !stream_.deviceBuffer ) {
5255
    errorType = RtAudioError::MEMORY_ERROR;
5256
    errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5257
    goto Exit;
5258
  }
5259
 
5260
  // stream process loop
5261
  while ( stream_.state != STREAM_STOPPING ) {
5262
    if ( !callbackPulled ) {
5263
      // Callback Input
5264
      // ==============
5265
      // 1. Pull callback buffer from inputBuffer
5266
      // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5267
      //                          Convert callback buffer to user format
5268
 
5269
      if ( captureAudioClient )
5270
      {
5271
        int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5272
 
5273
        convBufferSize = 0;
5274
        while ( convBufferSize < stream_.bufferSize )
5275
        {
5276
          // Pull callback buffer from inputBuffer
5277
          callbackPulled = captureBuffer.pullBuffer( convBuffer,
5278
                                                     samplesToPull * stream_.nDeviceChannels[INPUT],
5279
                                                     stream_.deviceFormat[INPUT] );
5280
 
5281
          if ( !callbackPulled )
5282
          {
5283
            break;
5284
          }
5285
 
5286
          // Convert callback buffer to user sample rate
5287
          unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5288
          unsigned int convSamples = 0;
5289
 
5290
          captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5291
                                     convBuffer,
5292
                                     samplesToPull,
5293
                                     convSamples,
5294
                                     convBufferSize == 0 ? -1 : stream_.bufferSize - convBufferSize );
5295
 
5296
          convBufferSize += convSamples;
5297
          samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5298
        }
5299
 
5300
        if ( callbackPulled )
5301
        {
5302
          if ( stream_.doConvertBuffer[INPUT] ) {
5303
            // Convert callback buffer to user format
5304
            convertBuffer( stream_.userBuffer[INPUT],
5305
                           stream_.deviceBuffer,
5306
                           stream_.convertInfo[INPUT] );
5307
          }
5308
          else {
5309
            // no further conversion, simple copy deviceBuffer to userBuffer
5310
            memcpy( stream_.userBuffer[INPUT],
5311
                    stream_.deviceBuffer,
5312
                    stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5313
          }
5314
        }
5315
      }
5316
      else {
5317
        // if there is no capture stream, set callbackPulled flag
5318
        callbackPulled = true;
5319
      }
5320
 
5321
      // Execute Callback
5322
      // ================
5323
      // 1. Execute user callback method
5324
      // 2. Handle return value from callback
5325
 
5326
      // if callback has not requested the stream to stop
5327
      if ( callbackPulled && !callbackStopped ) {
5328
        // Execute user callback method
5329
        callbackResult = callback( stream_.userBuffer[OUTPUT],
5330
                                   stream_.userBuffer[INPUT],
5331
                                   stream_.bufferSize,
5332
                                   getStreamTime(),
5333
                                   captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5334
                                   stream_.callbackInfo.userData );
5335
 
5336
        // tick stream time
5337
        RtApi::tickStreamTime();
5338
 
5339
        // Handle return value from callback
5340
        if ( callbackResult == 1 ) {
5341
          // instantiate a thread to stop this thread
5342
          HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5343
          if ( !threadHandle ) {
5344
            errorType = RtAudioError::THREAD_ERROR;
5345
            errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5346
            goto Exit;
5347
          }
5348
          else if ( !CloseHandle( threadHandle ) ) {
5349
            errorType = RtAudioError::THREAD_ERROR;
5350
            errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5351
            goto Exit;
5352
          }
5353
 
5354
          callbackStopped = true;
5355
        }
5356
        else if ( callbackResult == 2 ) {
5357
          // instantiate a thread to stop this thread
5358
          HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5359
          if ( !threadHandle ) {
5360
            errorType = RtAudioError::THREAD_ERROR;
5361
            errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5362
            goto Exit;
5363
          }
5364
          else if ( !CloseHandle( threadHandle ) ) {
5365
            errorType = RtAudioError::THREAD_ERROR;
5366
            errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5367
            goto Exit;
5368
          }
5369
 
5370
          callbackStopped = true;
5371
        }
5372
      }
5373
    }
5374
 
5375
    // Callback Output
5376
    // ===============
5377
    // 1. Convert callback buffer to stream format
5378
    // 2. Convert callback buffer to stream sample rate and channel count
5379
    // 3. Push callback buffer into outputBuffer
5380
 
5381
    if ( renderAudioClient && callbackPulled )
5382
    {
5383
      // if the last call to renderBuffer.PushBuffer() was successful
5384
      if ( callbackPushed || convBufferSize == 0 )
5385
      {
5386
        if ( stream_.doConvertBuffer[OUTPUT] )
5387
        {
5388
          // Convert callback buffer to stream format
5389
          convertBuffer( stream_.deviceBuffer,
5390
                         stream_.userBuffer[OUTPUT],
5391
                         stream_.convertInfo[OUTPUT] );
5392
 
5393
        }
5394
        else {
5395
          // no further conversion, simple copy userBuffer to deviceBuffer
5396
          memcpy( stream_.deviceBuffer,
5397
                  stream_.userBuffer[OUTPUT],
5398
                  stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5399
        }
5400
 
5401
        // Convert callback buffer to stream sample rate
5402
        renderResampler->Convert( convBuffer,
5403
                                  stream_.deviceBuffer,
5404
                                  stream_.bufferSize,
5405
                                  convBufferSize );
5406
      }
5407
 
5408
      // Push callback buffer into outputBuffer
5409
      callbackPushed = renderBuffer.pushBuffer( convBuffer,
5410
                                                convBufferSize * stream_.nDeviceChannels[OUTPUT],
5411
                                                stream_.deviceFormat[OUTPUT] );
5412
    }
5413
    else {
5414
      // if there is no render stream, set callbackPushed flag
5415
      callbackPushed = true;
5416
    }
5417
 
5418
    // Stream Capture
5419
    // ==============
5420
    // 1. Get capture buffer from stream
5421
    // 2. Push capture buffer into inputBuffer
5422
    // 3. If 2. was successful: Release capture buffer
5423
 
5424
    if ( captureAudioClient ) {
5425
      // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5426
      if ( !callbackPulled ) {
5427
        WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5428
      }
5429
 
5430
      // Get capture buffer from stream
5431
      hr = captureClient->GetBuffer( &streamBuffer,
5432
                                     &bufferFrameCount,
5433
                                     &captureFlags, NULL, NULL );
5434
      if ( FAILED( hr ) ) {
5435
        errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5436
        goto Exit;
5437
      }
5438
 
5439
      if ( bufferFrameCount != 0 ) {
5440
        // Push capture buffer into inputBuffer
5441
        if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5442
                                       bufferFrameCount * stream_.nDeviceChannels[INPUT],
5443
                                       stream_.deviceFormat[INPUT] ) )
5444
        {
5445
          // Release capture buffer
5446
          hr = captureClient->ReleaseBuffer( bufferFrameCount );
5447
          if ( FAILED( hr ) ) {
5448
            errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5449
            goto Exit;
5450
          }
5451
        }
5452
        else
5453
        {
5454
          // Inform WASAPI that capture was unsuccessful
5455
          hr = captureClient->ReleaseBuffer( 0 );
5456
          if ( FAILED( hr ) ) {
5457
            errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5458
            goto Exit;
5459
          }
5460
        }
5461
      }
5462
      else
5463
      {
5464
        // Inform WASAPI that capture was unsuccessful
5465
        hr = captureClient->ReleaseBuffer( 0 );
5466
        if ( FAILED( hr ) ) {
5467
          errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5468
          goto Exit;
5469
        }
5470
      }
5471
    }
5472
 
5473
    // Stream Render
5474
    // =============
5475
    // 1. Get render buffer from stream
5476
    // 2. Pull next buffer from outputBuffer
5477
    // 3. If 2. was successful: Fill render buffer with next buffer
5478
    //                          Release render buffer
5479
 
5480
    if ( renderAudioClient ) {
5481
      // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5482
      if ( callbackPulled && !callbackPushed ) {
5483
        WaitForSingleObject( renderEvent, INFINITE );
5484
      }
5485
 
5486
      // Get render buffer from stream
5487
      hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5488
      if ( FAILED( hr ) ) {
5489
        errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5490
        goto Exit;
5491
      }
5492
 
5493
      hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5494
      if ( FAILED( hr ) ) {
5495
        errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5496
        goto Exit;
5497
      }
5498
 
5499
      bufferFrameCount -= numFramesPadding;
5500
 
5501
      if ( bufferFrameCount != 0 ) {
5502
        hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5503
        if ( FAILED( hr ) ) {
5504
          errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5505
          goto Exit;
5506
        }
5507
 
5508
        // Pull next buffer from outputBuffer
5509
        // Fill render buffer with next buffer
5510
        if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5511
                                      bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5512
                                      stream_.deviceFormat[OUTPUT] ) )
5513
        {
5514
          // Release render buffer
5515
          hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5516
          if ( FAILED( hr ) ) {
5517
            errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5518
            goto Exit;
5519
          }
5520
        }
5521
        else
5522
        {
5523
          // Inform WASAPI that render was unsuccessful
5524
          hr = renderClient->ReleaseBuffer( 0, 0 );
5525
          if ( FAILED( hr ) ) {
5526
            errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5527
            goto Exit;
5528
          }
5529
        }
5530
      }
5531
      else
5532
      {
5533
        // Inform WASAPI that render was unsuccessful
5534
        hr = renderClient->ReleaseBuffer( 0, 0 );
5535
        if ( FAILED( hr ) ) {
5536
          errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5537
          goto Exit;
5538
        }
5539
      }
5540
    }
5541
 
5542
    // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5543
    if ( callbackPushed ) {
5544
      // unsetting the callbackPulled flag lets the stream know that
5545
      // the audio device is ready for another callback output buffer.
5546
      callbackPulled = false;
5547
    }
5548
 
5549
  }
5550
 
5551
Exit:
5552
  // clean up
5553
  CoTaskMemFree( captureFormat );
5554
  CoTaskMemFree( renderFormat );
5555
 
5556
  free ( convBuffer );
5557
  delete renderResampler;
5558
  delete captureResampler;
5559
 
5560
  CoUninitialize();
5561
 
5562
  if ( !errorText.empty() )
5563
  {
5564
    errorText_ = errorText;
5565
    error( errorType );
5566
  }
5567
 
5568
  // update stream state
5569
  stream_.state = STREAM_STOPPED;
5570
}
5571
 
5572
//******************** End of __WINDOWS_WASAPI__ *********************//
5573
#endif
5574
 
5575
 
5576
#if defined(__WINDOWS_DS__) // Windows DirectSound API
5577
 
5578
// Modified by Robin Davies, October 2005
5579
// - Improvements to DirectX pointer chasing. 
5580
// - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5581
// - Auto-call CoInitialize for DSOUND and ASIO platforms.
5582
// Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5583
// Changed device query structure for RtAudio 4.0.7, January 2010
5584
 
5585
#include <windows.h>
5586
#include <process.h>
5587
#include <mmsystem.h>
5588
#include <mmreg.h>
5589
#include <dsound.h>
5590
#include <assert.h>
5591
#include <algorithm>
5592
 
5593
#if defined(__MINGW32__)
5594
  // missing from latest mingw winapi
5595
#define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5596
#define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5597
#define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5598
#define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5599
#endif
5600
 
5601
#define MINIMUM_DEVICE_BUFFER_SIZE 32768
5602
 
5603
#ifdef _MSC_VER // if Microsoft Visual C++
5604
#pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5605
#endif
5606
 
5607
static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5608
{
5609
  if ( pointer > bufferSize ) pointer -= bufferSize;
5610
  if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5611
  if ( pointer < earlierPointer ) pointer += bufferSize;
5612
  return pointer >= earlierPointer && pointer < laterPointer;
5613
}
5614
 
5615
// A structure to hold various information related to the DirectSound
5616
// API implementation.
5617
struct DsHandle {
5618
  unsigned int drainCounter; // Tracks callback counts when draining
5619
  bool internalDrain;        // Indicates if stop is initiated from callback or not.
5620
  void *id[2];
5621
  void *buffer[2];
5622
  bool xrun[2];
5623
  UINT bufferPointer[2];  
5624
  DWORD dsBufferSize[2];
5625
  DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5626
  HANDLE condition;
5627
 
5628
  DsHandle()
5629
    :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5630
};
5631
 
5632
// Declarations for utility functions, callbacks, and structures
5633
// specific to the DirectSound implementation.
5634
static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5635
                                          LPCTSTR description,
5636
                                          LPCTSTR module,
5637
                                          LPVOID lpContext );
5638
 
5639
static const char* getErrorString( int code );
5640
 
5641
static unsigned __stdcall callbackHandler( void *ptr );
5642
 
5643
struct DsDevice {
5644
  LPGUID id[2];
5645
  bool validId[2];
5646
  bool found;
5647
  std::string name;
5648
 
5649
  DsDevice()
5650
  : found(false) { validId[0] = false; validId[1] = false; }
5651
};
5652
 
5653
struct DsProbeData {
5654
  bool isInput;
5655
  std::vector<struct DsDevice>* dsDevices;
5656
};
5657
 
5658
RtApiDs :: RtApiDs()
5659
{
5660
  // Dsound will run both-threaded. If CoInitialize fails, then just
5661
  // accept whatever the mainline chose for a threading model.
5662
  coInitialized_ = false;
5663
  HRESULT hr = CoInitialize( NULL );
5664
  if ( !FAILED( hr ) ) coInitialized_ = true;
5665
}
5666
 
5667
RtApiDs :: ~RtApiDs()
5668
{
5669
  if ( stream_.state != STREAM_CLOSED ) closeStream();
5670
  if ( coInitialized_ ) CoUninitialize(); // balanced call.
5671
}
5672
 
5673
// The DirectSound default output is always the first device.
5674
unsigned int RtApiDs :: getDefaultOutputDevice( void )
5675
{
5676
  return 0;
5677
}
5678
 
5679
// The DirectSound default input is always the first input device,
5680
// which is the first capture device enumerated.
5681
unsigned int RtApiDs :: getDefaultInputDevice( void )
5682
{
5683
  return 0;
5684
}
5685
 
5686
unsigned int RtApiDs :: getDeviceCount( void )
5687
{
5688
  // Set query flag for previously found devices to false, so that we
5689
  // can check for any devices that have disappeared.
5690
  for ( unsigned int i=0; i<dsDevices.size(); i++ )
5691
    dsDevices[i].found = false;
5692
 
5693
  // Query DirectSound devices.
5694
  struct DsProbeData probeInfo;
5695
  probeInfo.isInput = false;
5696
  probeInfo.dsDevices = &dsDevices;
5697
  HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5698
  if ( FAILED( result ) ) {
5699
    errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5700
    errorText_ = errorStream_.str();
5701
    error( RtAudioError::WARNING );
5702
  }
5703
 
5704
  // Query DirectSoundCapture devices.
5705
  probeInfo.isInput = true;
5706
  result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5707
  if ( FAILED( result ) ) {
5708
    errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5709
    errorText_ = errorStream_.str();
5710
    error( RtAudioError::WARNING );
5711
  }
5712
 
5713
  // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5714
  for ( unsigned int i=0; i<dsDevices.size(); ) {
5715
    if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5716
    else i++;
5717
  }
5718
 
5719
  return static_cast<unsigned int>(dsDevices.size());
5720
}
5721
 
5722
RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5723
{
5724
  RtAudio::DeviceInfo info;
5725
  info.probed = false;
5726
 
5727
  if ( dsDevices.size() == 0 ) {
5728
    // Force a query of all devices
5729
    getDeviceCount();
5730
    if ( dsDevices.size() == 0 ) {
5731
      errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5732
      error( RtAudioError::INVALID_USE );
5733
      return info;
5734
    }
5735
  }
5736
 
5737
  if ( device >= dsDevices.size() ) {
5738
    errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5739
    error( RtAudioError::INVALID_USE );
5740
    return info;
5741
  }
5742
 
5743
  HRESULT result;
5744
  if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5745
 
5746
  LPDIRECTSOUND output;
5747
  DSCAPS outCaps;
5748
  result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5749
  if ( FAILED( result ) ) {
5750
    errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5751
    errorText_ = errorStream_.str();
5752
    error( RtAudioError::WARNING );
5753
    goto probeInput;
5754
  }
5755
 
5756
  outCaps.dwSize = sizeof( outCaps );
5757
  result = output->GetCaps( &outCaps );
5758
  if ( FAILED( result ) ) {
5759
    output->Release();
5760
    errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5761
    errorText_ = errorStream_.str();
5762
    error( RtAudioError::WARNING );
5763
    goto probeInput;
5764
  }
5765
 
5766
  // Get output channel information.
5767
  info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5768
 
5769
  // Get sample rate information.
5770
  info.sampleRates.clear();
5771
  for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5772
    if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5773
         SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5774
      info.sampleRates.push_back( SAMPLE_RATES[k] );
5775
 
5776
      if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5777
        info.preferredSampleRate = SAMPLE_RATES[k];
5778
    }
5779
  }
5780
 
5781
  // Get format information.
5782
  if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5783
  if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5784
 
5785
  output->Release();
5786
 
5787
  if ( getDefaultOutputDevice() == device )
5788
    info.isDefaultOutput = true;
5789
 
5790
  if ( dsDevices[ device ].validId[1] == false ) {
5791
    info.name = dsDevices[ device ].name;
5792
    info.probed = true;
5793
    return info;
5794
  }
5795
 
5796
 probeInput:
5797
 
5798
  LPDIRECTSOUNDCAPTURE input;
5799
  result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5800
  if ( FAILED( result ) ) {
5801
    errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5802
    errorText_ = errorStream_.str();
5803
    error( RtAudioError::WARNING );
5804
    return info;
5805
  }
5806
 
5807
  DSCCAPS inCaps;
5808
  inCaps.dwSize = sizeof( inCaps );
5809
  result = input->GetCaps( &inCaps );
5810
  if ( FAILED( result ) ) {
5811
    input->Release();
5812
    errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5813
    errorText_ = errorStream_.str();
5814
    error( RtAudioError::WARNING );
5815
    return info;
5816
  }
5817
 
5818
  // Get input channel information.
5819
  info.inputChannels = inCaps.dwChannels;
5820
 
5821
  // Get sample rate and format information.
5822
  std::vector<unsigned int> rates;
5823
  if ( inCaps.dwChannels >= 2 ) {
5824
    if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5825
    if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5826
    if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5827
    if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5828
    if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5829
    if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5830
    if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5831
    if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5832
 
5833
    if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5834
      if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5835
      if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5836
      if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5837
      if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5838
    }
5839
    else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5840
      if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5841
      if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5842
      if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5843
      if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5844
    }
5845
  }
5846
  else if ( inCaps.dwChannels == 1 ) {
5847
    if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5848
    if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5849
    if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5850
    if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5851
    if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5852
    if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5853
    if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5854
    if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5855
 
5856
    if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5857
      if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5858
      if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5859
      if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5860
      if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5861
    }
5862
    else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5863
      if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5864
      if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5865
      if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5866
      if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5867
    }
5868
  }
5869
  else info.inputChannels = 0; // technically, this would be an error
5870
 
5871
  input->Release();
5872
 
5873
  if ( info.inputChannels == 0 ) return info;
5874
 
5875
  // Copy the supported rates to the info structure but avoid duplication.
5876
  bool found;
5877
  for ( unsigned int i=0; i<rates.size(); i++ ) {
5878
    found = false;
5879
    for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5880
      if ( rates[i] == info.sampleRates[j] ) {
5881
        found = true;
5882
        break;
5883
      }
5884
    }
5885
    if ( found == false ) info.sampleRates.push_back( rates[i] );
5886
  }
5887
  std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5888
 
5889
  // If device opens for both playback and capture, we determine the channels.
5890
  if ( info.outputChannels > 0 && info.inputChannels > 0 )
5891
    info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5892
 
5893
  if ( device == 0 ) info.isDefaultInput = true;
5894
 
5895
  // Copy name and return.
5896
  info.name = dsDevices[ device ].name;
5897
  info.probed = true;
5898
  return info;
5899
}
5900
 
5901
bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5902
                                 unsigned int firstChannel, unsigned int sampleRate,
5903
                                 RtAudioFormat format, unsigned int *bufferSize,
5904
                                 RtAudio::StreamOptions *options )
5905
{
5906
  if ( channels + firstChannel > 2 ) {
5907
    errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5908
    return FAILURE;
5909
  }
5910
 
5911
  size_t nDevices = dsDevices.size();
5912
  if ( nDevices == 0 ) {
5913
    // This should not happen because a check is made before this function is called.
5914
    errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5915
    return FAILURE;
5916
  }
5917
 
5918
  if ( device >= nDevices ) {
5919
    // This should not happen because a check is made before this function is called.
5920
    errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5921
    return FAILURE;
5922
  }
5923
 
5924
  if ( mode == OUTPUT ) {
5925
    if ( dsDevices[ device ].validId[0] == false ) {
5926
      errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5927
      errorText_ = errorStream_.str();
5928
      return FAILURE;
5929
    }
5930
  }
5931
  else { // mode == INPUT
5932
    if ( dsDevices[ device ].validId[1] == false ) {
5933
      errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5934
      errorText_ = errorStream_.str();
5935
      return FAILURE;
5936
    }
5937
  }
5938
 
5939
  // According to a note in PortAudio, using GetDesktopWindow()
5940
  // instead of GetForegroundWindow() is supposed to avoid problems
5941
  // that occur when the application's window is not the foreground
5942
  // window.  Also, if the application window closes before the
5943
  // DirectSound buffer, DirectSound can crash.  In the past, I had
5944
  // problems when using GetDesktopWindow() but it seems fine now
5945
  // (January 2010).  I'll leave it commented here.
5946
  // HWND hWnd = GetForegroundWindow();
5947
  HWND hWnd = GetDesktopWindow();
5948
 
5949
  // Check the numberOfBuffers parameter and limit the lowest value to
5950
  // two.  This is a judgement call and a value of two is probably too
5951
  // low for capture, but it should work for playback.
5952
  int nBuffers = 0;
5953
  if ( options ) nBuffers = options->numberOfBuffers;
5954
  if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5955
  if ( nBuffers < 2 ) nBuffers = 3;
5956
 
5957
  // Check the lower range of the user-specified buffer size and set
5958
  // (arbitrarily) to a lower bound of 32.
5959
  if ( *bufferSize < 32 ) *bufferSize = 32;
5960
 
5961
  // Create the wave format structure.  The data format setting will
5962
  // be determined later.
5963
  WAVEFORMATEX waveFormat;
5964
  ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5965
  waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5966
  waveFormat.nChannels = channels + firstChannel;
5967
  waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5968
 
5969
  // Determine the device buffer size. By default, we'll use the value
5970
  // defined above (32K), but we will grow it to make allowances for
5971
  // very large software buffer sizes.
5972
  DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5973
  DWORD dsPointerLeadTime = 0;
5974
 
5975
  void *ohandle = 0, *bhandle = 0;
5976
  HRESULT result;
5977
  if ( mode == OUTPUT ) {
5978
 
5979
    LPDIRECTSOUND output;
5980
    result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5981
    if ( FAILED( result ) ) {
5982
      errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5983
      errorText_ = errorStream_.str();
5984
      return FAILURE;
5985
    }
5986
 
5987
    DSCAPS outCaps;
5988
    outCaps.dwSize = sizeof( outCaps );
5989
    result = output->GetCaps( &outCaps );
5990
    if ( FAILED( result ) ) {
5991
      output->Release();
5992
      errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5993
      errorText_ = errorStream_.str();
5994
      return FAILURE;
5995
    }
5996
 
5997
    // Check channel information.
5998
    if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5999
      errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
6000
      errorText_ = errorStream_.str();
6001
      return FAILURE;
6002
    }
6003
 
6004
    // Check format information.  Use 16-bit format unless not
6005
    // supported or user requests 8-bit.
6006
    if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
6007
         !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
6008
      waveFormat.wBitsPerSample = 16;
6009
      stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6010
    }
6011
    else {
6012
      waveFormat.wBitsPerSample = 8;
6013
      stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6014
    }
6015
    stream_.userFormat = format;
6016
 
6017
    // Update wave format structure and buffer information.
6018
    waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6019
    waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6020
    dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6021
 
6022
    // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6023
    while ( dsPointerLeadTime * 2U > dsBufferSize )
6024
      dsBufferSize *= 2;
6025
 
6026
    // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
6027
    // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
6028
    // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
6029
    result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
6030
    if ( FAILED( result ) ) {
6031
      output->Release();
6032
      errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
6033
      errorText_ = errorStream_.str();
6034
      return FAILURE;
6035
    }
6036
 
6037
    // Even though we will write to the secondary buffer, we need to
6038
    // access the primary buffer to set the correct output format
6039
    // (since the default is 8-bit, 22 kHz!).  Setup the DS primary
6040
    // buffer description.
6041
    DSBUFFERDESC bufferDescription;
6042
    ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6043
    bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6044
    bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
6045
 
6046
    // Obtain the primary buffer
6047
    LPDIRECTSOUNDBUFFER buffer;
6048
    result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6049
    if ( FAILED( result ) ) {
6050
      output->Release();
6051
      errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
6052
      errorText_ = errorStream_.str();
6053
      return FAILURE;
6054
    }
6055
 
6056
    // Set the primary DS buffer sound format.
6057
    result = buffer->SetFormat( &waveFormat );
6058
    if ( FAILED( result ) ) {
6059
      output->Release();
6060
      errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6061
      errorText_ = errorStream_.str();
6062
      return FAILURE;
6063
    }
6064
 
6065
    // Setup the secondary DS buffer description.
6066
    ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6067
    bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6068
    bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6069
                                  DSBCAPS_GLOBALFOCUS |
6070
                                  DSBCAPS_GETCURRENTPOSITION2 |
6071
                                  DSBCAPS_LOCHARDWARE );  // Force hardware mixing
6072
    bufferDescription.dwBufferBytes = dsBufferSize;
6073
    bufferDescription.lpwfxFormat = &waveFormat;
6074
 
6075
    // Try to create the secondary DS buffer.  If that doesn't work,
6076
    // try to use software mixing.  Otherwise, there's a problem.
6077
    result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6078
    if ( FAILED( result ) ) {
6079
      bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6080
                                    DSBCAPS_GLOBALFOCUS |
6081
                                    DSBCAPS_GETCURRENTPOSITION2 |
6082
                                    DSBCAPS_LOCSOFTWARE );  // Force software mixing
6083
      result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6084
      if ( FAILED( result ) ) {
6085
        output->Release();
6086
        errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6087
        errorText_ = errorStream_.str();
6088
        return FAILURE;
6089
      }
6090
    }
6091
 
6092
    // Get the buffer size ... might be different from what we specified.
6093
    DSBCAPS dsbcaps;
6094
    dsbcaps.dwSize = sizeof( DSBCAPS );
6095
    result = buffer->GetCaps( &dsbcaps );
6096
    if ( FAILED( result ) ) {
6097
      output->Release();
6098
      buffer->Release();
6099
      errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6100
      errorText_ = errorStream_.str();
6101
      return FAILURE;
6102
    }
6103
 
6104
    dsBufferSize = dsbcaps.dwBufferBytes;
6105
 
6106
    // Lock the DS buffer
6107
    LPVOID audioPtr;
6108
    DWORD dataLen;
6109
    result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6110
    if ( FAILED( result ) ) {
6111
      output->Release();
6112
      buffer->Release();
6113
      errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6114
      errorText_ = errorStream_.str();
6115
      return FAILURE;
6116
    }
6117
 
6118
    // Zero the DS buffer
6119
    ZeroMemory( audioPtr, dataLen );
6120
 
6121
    // Unlock the DS buffer
6122
    result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6123
    if ( FAILED( result ) ) {
6124
      output->Release();
6125
      buffer->Release();
6126
      errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6127
      errorText_ = errorStream_.str();
6128
      return FAILURE;
6129
    }
6130
 
6131
    ohandle = (void *) output;
6132
    bhandle = (void *) buffer;
6133
  }
6134
 
6135
  if ( mode == INPUT ) {
6136
 
6137
    LPDIRECTSOUNDCAPTURE input;
6138
    result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6139
    if ( FAILED( result ) ) {
6140
      errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6141
      errorText_ = errorStream_.str();
6142
      return FAILURE;
6143
    }
6144
 
6145
    DSCCAPS inCaps;
6146
    inCaps.dwSize = sizeof( inCaps );
6147
    result = input->GetCaps( &inCaps );
6148
    if ( FAILED( result ) ) {
6149
      input->Release();
6150
      errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6151
      errorText_ = errorStream_.str();
6152
      return FAILURE;
6153
    }
6154
 
6155
    // Check channel information.
6156
    if ( inCaps.dwChannels < channels + firstChannel ) {
6157
      errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6158
      return FAILURE;
6159
    }
6160
 
6161
    // Check format information.  Use 16-bit format unless user
6162
    // requests 8-bit.
6163
    DWORD deviceFormats;
6164
    if ( channels + firstChannel == 2 ) {
6165
      deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6166
      if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6167
        waveFormat.wBitsPerSample = 8;
6168
        stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6169
      }
6170
      else { // assume 16-bit is supported
6171
        waveFormat.wBitsPerSample = 16;
6172
        stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6173
      }
6174
    }
6175
    else { // channel == 1
6176
      deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6177
      if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6178
        waveFormat.wBitsPerSample = 8;
6179
        stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6180
      }
6181
      else { // assume 16-bit is supported
6182
        waveFormat.wBitsPerSample = 16;
6183
        stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6184
      }
6185
    }
6186
    stream_.userFormat = format;
6187
 
6188
    // Update wave format structure and buffer information.
6189
    waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6190
    waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6191
    dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6192
 
6193
    // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6194
    while ( dsPointerLeadTime * 2U > dsBufferSize )
6195
      dsBufferSize *= 2;
6196
 
6197
    // Setup the secondary DS buffer description.
6198
    DSCBUFFERDESC bufferDescription;
6199
    ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6200
    bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6201
    bufferDescription.dwFlags = 0;
6202
    bufferDescription.dwReserved = 0;
6203
    bufferDescription.dwBufferBytes = dsBufferSize;
6204
    bufferDescription.lpwfxFormat = &waveFormat;
6205
 
6206
    // Create the capture buffer.
6207
    LPDIRECTSOUNDCAPTUREBUFFER buffer;
6208
    result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6209
    if ( FAILED( result ) ) {
6210
      input->Release();
6211
      errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6212
      errorText_ = errorStream_.str();
6213
      return FAILURE;
6214
    }
6215
 
6216
    // Get the buffer size ... might be different from what we specified.
6217
    DSCBCAPS dscbcaps;
6218
    dscbcaps.dwSize = sizeof( DSCBCAPS );
6219
    result = buffer->GetCaps( &dscbcaps );
6220
    if ( FAILED( result ) ) {
6221
      input->Release();
6222
      buffer->Release();
6223
      errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6224
      errorText_ = errorStream_.str();
6225
      return FAILURE;
6226
    }
6227
 
6228
    dsBufferSize = dscbcaps.dwBufferBytes;
6229
 
6230
    // NOTE: We could have a problem here if this is a duplex stream
6231
    // and the play and capture hardware buffer sizes are different
6232
    // (I'm actually not sure if that is a problem or not).
6233
    // Currently, we are not verifying that.
6234
 
6235
    // Lock the capture buffer
6236
    LPVOID audioPtr;
6237
    DWORD dataLen;
6238
    result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6239
    if ( FAILED( result ) ) {
6240
      input->Release();
6241
      buffer->Release();
6242
      errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6243
      errorText_ = errorStream_.str();
6244
      return FAILURE;
6245
    }
6246
 
6247
    // Zero the buffer
6248
    ZeroMemory( audioPtr, dataLen );
6249
 
6250
    // Unlock the buffer
6251
    result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6252
    if ( FAILED( result ) ) {
6253
      input->Release();
6254
      buffer->Release();
6255
      errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6256
      errorText_ = errorStream_.str();
6257
      return FAILURE;
6258
    }
6259
 
6260
    ohandle = (void *) input;
6261
    bhandle = (void *) buffer;
6262
  }
6263
 
6264
  // Set various stream parameters
6265
  DsHandle *handle = 0;
6266
  stream_.nDeviceChannels[mode] = channels + firstChannel;
6267
  stream_.nUserChannels[mode] = channels;
6268
  stream_.bufferSize = *bufferSize;
6269
  stream_.channelOffset[mode] = firstChannel;
6270
  stream_.deviceInterleaved[mode] = true;
6271
  if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6272
  else stream_.userInterleaved = true;
6273
 
6274
  // Set flag for buffer conversion
6275
  stream_.doConvertBuffer[mode] = false;
6276
  if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6277
    stream_.doConvertBuffer[mode] = true;
6278
  if (stream_.userFormat != stream_.deviceFormat[mode])
6279
    stream_.doConvertBuffer[mode] = true;
6280
  if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6281
       stream_.nUserChannels[mode] > 1 )
6282
    stream_.doConvertBuffer[mode] = true;
6283
 
6284
  // Allocate necessary internal buffers
6285
  long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6286
  stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6287
  if ( stream_.userBuffer[mode] == NULL ) {
6288
    errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6289
    goto error;
6290
  }
6291
 
6292
  if ( stream_.doConvertBuffer[mode] ) {
6293
 
6294
    bool makeBuffer = true;
6295
    bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6296
    if ( mode == INPUT ) {
6297
      if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6298
        unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6299
        if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6300
      }
6301
    }
6302
 
6303
    if ( makeBuffer ) {
6304
      bufferBytes *= *bufferSize;
6305
      if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6306
      stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6307
      if ( stream_.deviceBuffer == NULL ) {
6308
        errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6309
        goto error;
6310
      }
6311
    }
6312
  }
6313
 
6314
  // Allocate our DsHandle structures for the stream.
6315
  if ( stream_.apiHandle == 0 ) {
6316
    try {
6317
      handle = new DsHandle;
6318
    }
6319
    catch ( std::bad_alloc& ) {
6320
      errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6321
      goto error;
6322
    }
6323
 
6324
    // Create a manual-reset event.
6325
    handle->condition = CreateEvent( NULL,   // no security
6326
                                     TRUE,   // manual-reset
6327
                                     FALSE,  // non-signaled initially
6328
                                     NULL ); // unnamed
6329
    stream_.apiHandle = (void *) handle;
6330
  }
6331
  else
6332
    handle = (DsHandle *) stream_.apiHandle;
6333
  handle->id[mode] = ohandle;
6334
  handle->buffer[mode] = bhandle;
6335
  handle->dsBufferSize[mode] = dsBufferSize;
6336
  handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6337
 
6338
  stream_.device[mode] = device;
6339
  stream_.state = STREAM_STOPPED;
6340
  if ( stream_.mode == OUTPUT && mode == INPUT )
6341
    // We had already set up an output stream.
6342
    stream_.mode = DUPLEX;
6343
  else
6344
    stream_.mode = mode;
6345
  stream_.nBuffers = nBuffers;
6346
  stream_.sampleRate = sampleRate;
6347
 
6348
  // Setup the buffer conversion information structure.
6349
  if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6350
 
6351
  // Setup the callback thread.
6352
  if ( stream_.callbackInfo.isRunning == false ) {
6353
    unsigned threadId;
6354
    stream_.callbackInfo.isRunning = true;
6355
    stream_.callbackInfo.object = (void *) this;
6356
    stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6357
                                                  &stream_.callbackInfo, 0, &threadId );
6358
    if ( stream_.callbackInfo.thread == 0 ) {
6359
      errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6360
      goto error;
6361
    }
6362
 
6363
    // Boost DS thread priority
6364
    SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6365
  }
6366
  return SUCCESS;
6367
 
6368
 error:
6369
  if ( handle ) {
6370
    if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6371
      LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6372
      LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6373
      if ( buffer ) buffer->Release();
6374
      object->Release();
6375
    }
6376
    if ( handle->buffer[1] ) {
6377
      LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6378
      LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6379
      if ( buffer ) buffer->Release();
6380
      object->Release();
6381
    }
6382
    CloseHandle( handle->condition );
6383
    delete handle;
6384
    stream_.apiHandle = 0;
6385
  }
6386
 
6387
  for ( int i=0; i<2; i++ ) {
6388
    if ( stream_.userBuffer[i] ) {
6389
      free( stream_.userBuffer[i] );
6390
      stream_.userBuffer[i] = 0;
6391
    }
6392
  }
6393
 
6394
  if ( stream_.deviceBuffer ) {
6395
    free( stream_.deviceBuffer );
6396
    stream_.deviceBuffer = 0;
6397
  }
6398
 
6399
  stream_.state = STREAM_CLOSED;
6400
  return FAILURE;
6401
}
6402
 
6403
void RtApiDs :: closeStream()
6404
{
6405
  if ( stream_.state == STREAM_CLOSED ) {
6406
    errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6407
    error( RtAudioError::WARNING );
6408
    return;
6409
  }
6410
 
6411
  // Stop the callback thread.
6412
  stream_.callbackInfo.isRunning = false;
6413
  WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6414
  CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6415
 
6416
  DsHandle *handle = (DsHandle *) stream_.apiHandle;
6417
  if ( handle ) {
6418
    if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6419
      LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6420
      LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6421
      if ( buffer ) {
6422
        buffer->Stop();
6423
        buffer->Release();
6424
      }
6425
      object->Release();
6426
    }
6427
    if ( handle->buffer[1] ) {
6428
      LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6429
      LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6430
      if ( buffer ) {
6431
        buffer->Stop();
6432
        buffer->Release();
6433
      }
6434
      object->Release();
6435
    }
6436
    CloseHandle( handle->condition );
6437
    delete handle;
6438
    stream_.apiHandle = 0;
6439
  }
6440
 
6441
  for ( int i=0; i<2; i++ ) {
6442
    if ( stream_.userBuffer[i] ) {
6443
      free( stream_.userBuffer[i] );
6444
      stream_.userBuffer[i] = 0;
6445
    }
6446
  }
6447
 
6448
  if ( stream_.deviceBuffer ) {
6449
    free( stream_.deviceBuffer );
6450
    stream_.deviceBuffer = 0;
6451
  }
6452
 
6453
  stream_.mode = UNINITIALIZED;
6454
  stream_.state = STREAM_CLOSED;
6455
}
6456
 
6457
void RtApiDs :: startStream()
6458
{
6459
  verifyStream();
6460
  if ( stream_.state == STREAM_RUNNING ) {
6461
    errorText_ = "RtApiDs::startStream(): the stream is already running!";
6462
    error( RtAudioError::WARNING );
6463
    return;
6464
  }
6465
 
6466
  #if defined( HAVE_GETTIMEOFDAY )
6467
  gettimeofday( &stream_.lastTickTimestamp, NULL );
6468
  #endif
6469
 
6470
  DsHandle *handle = (DsHandle *) stream_.apiHandle;
6471
 
6472
  // Increase scheduler frequency on lesser windows (a side-effect of
6473
  // increasing timer accuracy).  On greater windows (Win2K or later),
6474
  // this is already in effect.
6475
  timeBeginPeriod( 1 );
6476
 
6477
  buffersRolling = false;
6478
  duplexPrerollBytes = 0;
6479
 
6480
  if ( stream_.mode == DUPLEX ) {
6481
    // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6482
    duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6483
  }
6484
 
6485
  HRESULT result = 0;
6486
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6487
 
6488
    LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6489
    result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6490
    if ( FAILED( result ) ) {
6491
      errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6492
      errorText_ = errorStream_.str();
6493
      goto unlock;
6494
    }
6495
  }
6496
 
6497
  if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6498
 
6499
    LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6500
    result = buffer->Start( DSCBSTART_LOOPING );
6501
    if ( FAILED( result ) ) {
6502
      errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6503
      errorText_ = errorStream_.str();
6504
      goto unlock;
6505
    }
6506
  }
6507
 
6508
  handle->drainCounter = 0;
6509
  handle->internalDrain = false;
6510
  ResetEvent( handle->condition );
6511
  stream_.state = STREAM_RUNNING;
6512
 
6513
 unlock:
6514
  if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6515
}
6516
 
6517
void RtApiDs :: stopStream()
6518
{
6519
  verifyStream();
6520
  if ( stream_.state == STREAM_STOPPED ) {
6521
    errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6522
    error( RtAudioError::WARNING );
6523
    return;
6524
  }
6525
 
6526
  HRESULT result = 0;
6527
  LPVOID audioPtr;
6528
  DWORD dataLen;
6529
  DsHandle *handle = (DsHandle *) stream_.apiHandle;
6530
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6531
    if ( handle->drainCounter == 0 ) {
6532
      handle->drainCounter = 2;
6533
      WaitForSingleObject( handle->condition, INFINITE );  // block until signaled
6534
    }
6535
 
6536
    stream_.state = STREAM_STOPPED;
6537
 
6538
    MUTEX_LOCK( &stream_.mutex );
6539
 
6540
    // Stop the buffer and clear memory
6541
    LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6542
    result = buffer->Stop();
6543
    if ( FAILED( result ) ) {
6544
      errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6545
      errorText_ = errorStream_.str();
6546
      goto unlock;
6547
    }
6548
 
6549
    // Lock the buffer and clear it so that if we start to play again,
6550
    // we won't have old data playing.
6551
    result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6552
    if ( FAILED( result ) ) {
6553
      errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6554
      errorText_ = errorStream_.str();
6555
      goto unlock;
6556
    }
6557
 
6558
    // Zero the DS buffer
6559
    ZeroMemory( audioPtr, dataLen );
6560
 
6561
    // Unlock the DS buffer
6562
    result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6563
    if ( FAILED( result ) ) {
6564
      errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6565
      errorText_ = errorStream_.str();
6566
      goto unlock;
6567
    }
6568
 
6569
    // If we start playing again, we must begin at beginning of buffer.
6570
    handle->bufferPointer[0] = 0;
6571
  }
6572
 
6573
  if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6574
    LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6575
    audioPtr = NULL;
6576
    dataLen = 0;
6577
 
6578
    stream_.state = STREAM_STOPPED;
6579
 
6580
    if ( stream_.mode != DUPLEX )
6581
      MUTEX_LOCK( &stream_.mutex );
6582
 
6583
    result = buffer->Stop();
6584
    if ( FAILED( result ) ) {
6585
      errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6586
      errorText_ = errorStream_.str();
6587
      goto unlock;
6588
    }
6589
 
6590
    // Lock the buffer and clear it so that if we start to play again,
6591
    // we won't have old data playing.
6592
    result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6593
    if ( FAILED( result ) ) {
6594
      errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6595
      errorText_ = errorStream_.str();
6596
      goto unlock;
6597
    }
6598
 
6599
    // Zero the DS buffer
6600
    ZeroMemory( audioPtr, dataLen );
6601
 
6602
    // Unlock the DS buffer
6603
    result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6604
    if ( FAILED( result ) ) {
6605
      errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6606
      errorText_ = errorStream_.str();
6607
      goto unlock;
6608
    }
6609
 
6610
    // If we start recording again, we must begin at beginning of buffer.
6611
    handle->bufferPointer[1] = 0;
6612
  }
6613
 
6614
 unlock:
6615
  timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6616
  MUTEX_UNLOCK( &stream_.mutex );
6617
 
6618
  if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6619
}
6620
 
6621
void RtApiDs :: abortStream()
6622
{
6623
  verifyStream();
6624
  if ( stream_.state == STREAM_STOPPED ) {
6625
    errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6626
    error( RtAudioError::WARNING );
6627
    return;
6628
  }
6629
 
6630
  DsHandle *handle = (DsHandle *) stream_.apiHandle;
6631
  handle->drainCounter = 2;
6632
 
6633
  stopStream();
6634
}
6635
 
6636
void RtApiDs :: callbackEvent()
6637
{
6638
  if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6639
    Sleep( 50 ); // sleep 50 milliseconds
6640
    return;
6641
  }
6642
 
6643
  if ( stream_.state == STREAM_CLOSED ) {
6644
    errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6645
    error( RtAudioError::WARNING );
6646
    return;
6647
  }
6648
 
6649
  CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6650
  DsHandle *handle = (DsHandle *) stream_.apiHandle;
6651
 
6652
  // Check if we were draining the stream and signal is finished.
6653
  if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6654
 
6655
    stream_.state = STREAM_STOPPING;
6656
    if ( handle->internalDrain == false )
6657
      SetEvent( handle->condition );
6658
    else
6659
      stopStream();
6660
    return;
6661
  }
6662
 
6663
  // Invoke user callback to get fresh output data UNLESS we are
6664
  // draining stream.
6665
  if ( handle->drainCounter == 0 ) {
6666
    RtAudioCallback callback = (RtAudioCallback) info->callback;
6667
    double streamTime = getStreamTime();
6668
    RtAudioStreamStatus status = 0;
6669
    if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6670
      status |= RTAUDIO_OUTPUT_UNDERFLOW;
6671
      handle->xrun[0] = false;
6672
    }
6673
    if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6674
      status |= RTAUDIO_INPUT_OVERFLOW;
6675
      handle->xrun[1] = false;
6676
    }
6677
    int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6678
                                  stream_.bufferSize, streamTime, status, info->userData );
6679
    if ( cbReturnValue == 2 ) {
6680
      stream_.state = STREAM_STOPPING;
6681
      handle->drainCounter = 2;
6682
      abortStream();
6683
      return;
6684
    }
6685
    else if ( cbReturnValue == 1 ) {
6686
      handle->drainCounter = 1;
6687
      handle->internalDrain = true;
6688
    }
6689
  }
6690
 
6691
  HRESULT result;
6692
  DWORD currentWritePointer, safeWritePointer;
6693
  DWORD currentReadPointer, safeReadPointer;
6694
  UINT nextWritePointer;
6695
 
6696
  LPVOID buffer1 = NULL;
6697
  LPVOID buffer2 = NULL;
6698
  DWORD bufferSize1 = 0;
6699
  DWORD bufferSize2 = 0;
6700
 
6701
  char *buffer;
6702
  long bufferBytes;
6703
 
6704
  MUTEX_LOCK( &stream_.mutex );
6705
  if ( stream_.state == STREAM_STOPPED ) {
6706
    MUTEX_UNLOCK( &stream_.mutex );
6707
    return;
6708
  }
6709
 
6710
  if ( buffersRolling == false ) {
6711
    if ( stream_.mode == DUPLEX ) {
6712
      //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6713
 
6714
      // It takes a while for the devices to get rolling. As a result,
6715
      // there's no guarantee that the capture and write device pointers
6716
      // will move in lockstep.  Wait here for both devices to start
6717
      // rolling, and then set our buffer pointers accordingly.
6718
      // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6719
      // bytes later than the write buffer.
6720
 
6721
      // Stub: a serious risk of having a pre-emptive scheduling round
6722
      // take place between the two GetCurrentPosition calls... but I'm
6723
      // really not sure how to solve the problem.  Temporarily boost to
6724
      // Realtime priority, maybe; but I'm not sure what priority the
6725
      // DirectSound service threads run at. We *should* be roughly
6726
      // within a ms or so of correct.
6727
 
6728
      LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6729
      LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6730
 
6731
      DWORD startSafeWritePointer, startSafeReadPointer;
6732
 
6733
      result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6734
      if ( FAILED( result ) ) {
6735
        errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6736
        errorText_ = errorStream_.str();
6737
        MUTEX_UNLOCK( &stream_.mutex );
6738
        error( RtAudioError::SYSTEM_ERROR );
6739
        return;
6740
      }
6741
      result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6742
      if ( FAILED( result ) ) {
6743
        errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6744
        errorText_ = errorStream_.str();
6745
        MUTEX_UNLOCK( &stream_.mutex );
6746
        error( RtAudioError::SYSTEM_ERROR );
6747
        return;
6748
      }
6749
      while ( true ) {
6750
        result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6751
        if ( FAILED( result ) ) {
6752
          errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6753
          errorText_ = errorStream_.str();
6754
          MUTEX_UNLOCK( &stream_.mutex );
6755
          error( RtAudioError::SYSTEM_ERROR );
6756
          return;
6757
        }
6758
        result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6759
        if ( FAILED( result ) ) {
6760
          errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6761
          errorText_ = errorStream_.str();
6762
          MUTEX_UNLOCK( &stream_.mutex );
6763
          error( RtAudioError::SYSTEM_ERROR );
6764
          return;
6765
        }
6766
        if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6767
        Sleep( 1 );
6768
      }
6769
 
6770
      //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6771
 
6772
      handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6773
      if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6774
      handle->bufferPointer[1] = safeReadPointer;
6775
    }
6776
    else if ( stream_.mode == OUTPUT ) {
6777
 
6778
      // Set the proper nextWritePosition after initial startup.
6779
      LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6780
      result = dsWriteBuffer->GetCurrentPosition( &currentWritePointer, &safeWritePointer );
6781
      if ( FAILED( result ) ) {
6782
        errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6783
        errorText_ = errorStream_.str();
6784
        MUTEX_UNLOCK( &stream_.mutex );
6785
        error( RtAudioError::SYSTEM_ERROR );
6786
        return;
6787
      }
6788
      handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6789
      if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6790
    }
6791
 
6792
    buffersRolling = true;
6793
  }
6794
 
6795
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6796
 
6797
    LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6798
 
6799
    if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6800
      bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6801
      bufferBytes *= formatBytes( stream_.userFormat );
6802
      memset( stream_.userBuffer[0], 0, bufferBytes );
6803
    }
6804
 
6805
    // Setup parameters and do buffer conversion if necessary.
6806
    if ( stream_.doConvertBuffer[0] ) {
6807
      buffer = stream_.deviceBuffer;
6808
      convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6809
      bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6810
      bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6811
    }
6812
    else {
6813
      buffer = stream_.userBuffer[0];
6814
      bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6815
      bufferBytes *= formatBytes( stream_.userFormat );
6816
    }
6817
 
6818
    // No byte swapping necessary in DirectSound implementation.
6819
 
6820
    // Ahhh ... windoze.  16-bit data is signed but 8-bit data is
6821
    // unsigned.  So, we need to convert our signed 8-bit data here to
6822
    // unsigned.
6823
    if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6824
      for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6825
 
6826
    DWORD dsBufferSize = handle->dsBufferSize[0];
6827
    nextWritePointer = handle->bufferPointer[0];
6828
 
6829
    DWORD endWrite, leadPointer;
6830
    while ( true ) {
6831
      // Find out where the read and "safe write" pointers are.
6832
      result = dsBuffer->GetCurrentPosition( &currentWritePointer, &safeWritePointer );
6833
      if ( FAILED( result ) ) {
6834
        errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6835
        errorText_ = errorStream_.str();
6836
        MUTEX_UNLOCK( &stream_.mutex );
6837
        error( RtAudioError::SYSTEM_ERROR );
6838
        return;
6839
      }
6840
 
6841
      // We will copy our output buffer into the region between
6842
      // safeWritePointer and leadPointer.  If leadPointer is not
6843
      // beyond the next endWrite position, wait until it is.
6844
      leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6845
      //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6846
      if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6847
      if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6848
      endWrite = nextWritePointer + bufferBytes;
6849
 
6850
      // Check whether the entire write region is behind the play pointer.
6851
      if ( leadPointer >= endWrite ) break;
6852
 
6853
      // If we are here, then we must wait until the leadPointer advances
6854
      // beyond the end of our next write region. We use the
6855
      // Sleep() function to suspend operation until that happens.
6856
      double millis = ( endWrite - leadPointer ) * 1000.0;
6857
      millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6858
      if ( millis < 1.0 ) millis = 1.0;
6859
      Sleep( (DWORD) millis );
6860
    }
6861
 
6862
    if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6863
         || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6864
      // We've strayed into the forbidden zone ... resync the read pointer.
6865
      handle->xrun[0] = true;
6866
      nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6867
      if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6868
      handle->bufferPointer[0] = nextWritePointer;
6869
      endWrite = nextWritePointer + bufferBytes;
6870
    }
6871
 
6872
    // Lock free space in the buffer
6873
    result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6874
                             &bufferSize1, &buffer2, &bufferSize2, 0 );
6875
    if ( FAILED( result ) ) {
6876
      errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6877
      errorText_ = errorStream_.str();
6878
      MUTEX_UNLOCK( &stream_.mutex );
6879
      error( RtAudioError::SYSTEM_ERROR );
6880
      return;
6881
    }
6882
 
6883
    // Copy our buffer into the DS buffer
6884
    CopyMemory( buffer1, buffer, bufferSize1 );
6885
    if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6886
 
6887
    // Update our buffer offset and unlock sound buffer
6888
    dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6889
    if ( FAILED( result ) ) {
6890
      errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6891
      errorText_ = errorStream_.str();
6892
      MUTEX_UNLOCK( &stream_.mutex );
6893
      error( RtAudioError::SYSTEM_ERROR );
6894
      return;
6895
    }
6896
    nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6897
    handle->bufferPointer[0] = nextWritePointer;
6898
  }
6899
 
6900
  // Don't bother draining input
6901
  if ( handle->drainCounter ) {
6902
    handle->drainCounter++;
6903
    goto unlock;
6904
  }
6905
 
6906
  if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6907
 
6908
    // Setup parameters.
6909
    if ( stream_.doConvertBuffer[1] ) {
6910
      buffer = stream_.deviceBuffer;
6911
      bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6912
      bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6913
    }
6914
    else {
6915
      buffer = stream_.userBuffer[1];
6916
      bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6917
      bufferBytes *= formatBytes( stream_.userFormat );
6918
    }
6919
 
6920
    LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6921
    long nextReadPointer = handle->bufferPointer[1];
6922
    DWORD dsBufferSize = handle->dsBufferSize[1];
6923
 
6924
    // Find out where the write and "safe read" pointers are.
6925
    result = dsBuffer->GetCurrentPosition( &currentReadPointer, &safeReadPointer );
6926
    if ( FAILED( result ) ) {
6927
      errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6928
      errorText_ = errorStream_.str();
6929
      MUTEX_UNLOCK( &stream_.mutex );
6930
      error( RtAudioError::SYSTEM_ERROR );
6931
      return;
6932
    }
6933
 
6934
    if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6935
    DWORD endRead = nextReadPointer + bufferBytes;
6936
 
6937
    // Handling depends on whether we are INPUT or DUPLEX. 
6938
    // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6939
    // then a wait here will drag the write pointers into the forbidden zone.
6940
    // 
6941
    // In DUPLEX mode, rather than wait, we will back off the read pointer until 
6942
    // it's in a safe position. This causes dropouts, but it seems to be the only 
6943
    // practical way to sync up the read and write pointers reliably, given the 
6944
    // the very complex relationship between phase and increment of the read and write 
6945
    // pointers.
6946
    //
6947
    // In order to minimize audible dropouts in DUPLEX mode, we will
6948
    // provide a pre-roll period of 0.5 seconds in which we return
6949
    // zeros from the read buffer while the pointers sync up.
6950
 
6951
    if ( stream_.mode == DUPLEX ) {
6952
      if ( safeReadPointer < endRead ) {
6953
        if ( duplexPrerollBytes <= 0 ) {
6954
          // Pre-roll time over. Be more aggressive.
6955
          int adjustment = endRead-safeReadPointer;
6956
 
6957
          handle->xrun[1] = true;
6958
          // Two cases:
6959
          //   - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6960
          //     and perform fine adjustments later.
6961
          //   - small adjustments: back off by twice as much.
6962
          if ( adjustment >= 2*bufferBytes )
6963
            nextReadPointer = safeReadPointer-2*bufferBytes;
6964
          else
6965
            nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6966
 
6967
          if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6968
 
6969
        }
6970
        else {
6971
          // In pre=roll time. Just do it.
6972
          nextReadPointer = safeReadPointer - bufferBytes;
6973
          while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6974
        }
6975
        endRead = nextReadPointer + bufferBytes;
6976
      }
6977
    }
6978
    else { // mode == INPUT
6979
      while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6980
        // See comments for playback.
6981
        double millis = (endRead - safeReadPointer) * 1000.0;
6982
        millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6983
        if ( millis < 1.0 ) millis = 1.0;
6984
        Sleep( (DWORD) millis );
6985
 
6986
        // Wake up and find out where we are now.
6987
        result = dsBuffer->GetCurrentPosition( &currentReadPointer, &safeReadPointer );
6988
        if ( FAILED( result ) ) {
6989
          errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6990
          errorText_ = errorStream_.str();
6991
          MUTEX_UNLOCK( &stream_.mutex );
6992
          error( RtAudioError::SYSTEM_ERROR );
6993
          return;
6994
        }
6995
 
6996
        if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6997
      }
6998
    }
6999
 
7000
    // Lock free space in the buffer
7001
    result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
7002
                             &bufferSize1, &buffer2, &bufferSize2, 0 );
7003
    if ( FAILED( result ) ) {
7004
      errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
7005
      errorText_ = errorStream_.str();
7006
      MUTEX_UNLOCK( &stream_.mutex );
7007
      error( RtAudioError::SYSTEM_ERROR );
7008
      return;
7009
    }
7010
 
7011
    if ( duplexPrerollBytes <= 0 ) {
7012
      // Copy our buffer into the DS buffer
7013
      CopyMemory( buffer, buffer1, bufferSize1 );
7014
      if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
7015
    }
7016
    else {
7017
      memset( buffer, 0, bufferSize1 );
7018
      if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
7019
      duplexPrerollBytes -= bufferSize1 + bufferSize2;
7020
    }
7021
 
7022
    // Update our buffer offset and unlock sound buffer
7023
    nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
7024
    dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
7025
    if ( FAILED( result ) ) {
7026
      errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
7027
      errorText_ = errorStream_.str();
7028
      MUTEX_UNLOCK( &stream_.mutex );
7029
      error( RtAudioError::SYSTEM_ERROR );
7030
      return;
7031
    }
7032
    handle->bufferPointer[1] = nextReadPointer;
7033
 
7034
    // No byte swapping necessary in DirectSound implementation.
7035
 
7036
    // If necessary, convert 8-bit data from unsigned to signed.
7037
    if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
7038
      for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
7039
 
7040
    // Do buffer conversion if necessary.
7041
    if ( stream_.doConvertBuffer[1] )
7042
      convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7043
  }
7044
 
7045
 unlock:
7046
  MUTEX_UNLOCK( &stream_.mutex );
7047
  RtApi::tickStreamTime();
7048
}
7049
 
7050
// Definitions for utility functions and callbacks
7051
// specific to the DirectSound implementation.
7052
 
7053
static unsigned __stdcall callbackHandler( void *ptr )
7054
{
7055
  CallbackInfo *info = (CallbackInfo *) ptr;
7056
  RtApiDs *object = (RtApiDs *) info->object;
7057
  bool* isRunning = &info->isRunning;
7058
 
7059
  while ( *isRunning == true ) {
7060
    object->callbackEvent();
7061
  }
7062
 
7063
  _endthreadex( 0 );
7064
  return 0;
7065
}
7066
 
7067
static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7068
                                          LPCTSTR description,
7069
                                          LPCTSTR /*module*/,
7070
                                          LPVOID lpContext )
7071
{
7072
  struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7073
  std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7074
 
7075
  HRESULT hr;
7076
  bool validDevice = false;
7077
  if ( probeInfo.isInput == true ) {
7078
    DSCCAPS caps;
7079
    LPDIRECTSOUNDCAPTURE object;
7080
 
7081
    hr = DirectSoundCaptureCreate(  lpguid, &object,   NULL );
7082
    if ( hr != DS_OK ) return TRUE;
7083
 
7084
    caps.dwSize = sizeof(caps);
7085
    hr = object->GetCaps( &caps );
7086
    if ( hr == DS_OK ) {
7087
      if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7088
        validDevice = true;
7089
    }
7090
    object->Release();
7091
  }
7092
  else {
7093
    DSCAPS caps;
7094
    LPDIRECTSOUND object;
7095
    hr = DirectSoundCreate(  lpguid, &object,   NULL );
7096
    if ( hr != DS_OK ) return TRUE;
7097
 
7098
    caps.dwSize = sizeof(caps);
7099
    hr = object->GetCaps( &caps );
7100
    if ( hr == DS_OK ) {
7101
      if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7102
        validDevice = true;
7103
    }
7104
    object->Release();
7105
  }
7106
 
7107
  // If good device, then save its name and guid.
7108
  std::string name = convertCharPointerToStdString( description );
7109
 
7110
  if ( validDevice ) {
7111
    for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7112
      if ( dsDevices[i].name == name ) {
7113
        if ( probeInfo.isInput && dsDevices[i].id[1] == lpguid)
7114
        {
7115
          dsDevices[i].found = true;
7116
          dsDevices[i].validId[1] = true;
7117
        }
7118
        else if (dsDevices[i].id[0] == lpguid)
7119
        {
7120
          dsDevices[i].found = true;
7121
          dsDevices[i].validId[0] = true;
7122
        }
7123
        return TRUE;
7124
      }
7125
    }
7126
 
7127
    DsDevice device;
7128
    device.name = name;
7129
    device.found = true;
7130
    if ( probeInfo.isInput ) {
7131
      device.id[1] = lpguid;
7132
      device.validId[1] = true;
7133
    }
7134
    else {
7135
      device.id[0] = lpguid;
7136
      device.validId[0] = true;
7137
    }
7138
    dsDevices.push_back( device );
7139
  }
7140
 
7141
  return TRUE;
7142
}
7143
 
7144
static const char* getErrorString( int code )
7145
{
7146
  switch ( code ) {
7147
 
7148
  case DSERR_ALLOCATED:
7149
    return "Already allocated";
7150
 
7151
  case DSERR_CONTROLUNAVAIL:
7152
    return "Control unavailable";
7153
 
7154
  case DSERR_INVALIDPARAM:
7155
    return "Invalid parameter";
7156
 
7157
  case DSERR_INVALIDCALL:
7158
    return "Invalid call";
7159
 
7160
  case DSERR_GENERIC:
7161
    return "Generic error";
7162
 
7163
  case DSERR_PRIOLEVELNEEDED:
7164
    return "Priority level needed";
7165
 
7166
  case DSERR_OUTOFMEMORY:
7167
    return "Out of memory";
7168
 
7169
  case DSERR_BADFORMAT:
7170
    return "The sample rate or the channel format is not supported";
7171
 
7172
  case DSERR_UNSUPPORTED:
7173
    return "Not supported";
7174
 
7175
  case DSERR_NODRIVER:
7176
    return "No driver";
7177
 
7178
  case DSERR_ALREADYINITIALIZED:
7179
    return "Already initialized";
7180
 
7181
  case DSERR_NOAGGREGATION:
7182
    return "No aggregation";
7183
 
7184
  case DSERR_BUFFERLOST:
7185
    return "Buffer lost";
7186
 
7187
  case DSERR_OTHERAPPHASPRIO:
7188
    return "Another application already has priority";
7189
 
7190
  case DSERR_UNINITIALIZED:
7191
    return "Uninitialized";
7192
 
7193
  default:
7194
    return "DirectSound unknown error";
7195
  }
7196
}
7197
//******************** End of __WINDOWS_DS__ *********************//
7198
#endif
7199
 
7200
 
7201
#if defined(__LINUX_ALSA__)
7202
 
7203
#include <alsa/asoundlib.h>
7204
#include <unistd.h>
7205
 
7206
  // A structure to hold various information related to the ALSA API
7207
  // implementation.
7208
struct AlsaHandle {
7209
  snd_pcm_t *handles[2];
7210
  bool synchronized;
7211
  bool xrun[2];
7212
  pthread_cond_t runnable_cv;
7213
  bool runnable;
7214
 
7215
  AlsaHandle()
7216
#if _cplusplus >= 201103L
7217
    :handles{nullptr, nullptr}, synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7218
#else 
7219
    : synchronized(false), runnable(false) { handles[0] = NULL; handles[1] = NULL; xrun[0] = false; xrun[1] = false; }
7220
#endif
7221
};
7222
 
7223
static void *alsaCallbackHandler( void * ptr );
7224
 
7225
RtApiAlsa :: RtApiAlsa()
7226
{
7227
  // Nothing to do here.
7228
}
7229
 
7230
RtApiAlsa :: ~RtApiAlsa()
7231
{
7232
  if ( stream_.state != STREAM_CLOSED ) closeStream();
7233
}
7234
 
7235
unsigned int RtApiAlsa :: getDeviceCount( void )
7236
{
7237
  unsigned nDevices = 0;
7238
  int result, subdevice, card;
7239
  char name[64];
7240
  snd_ctl_t *handle = 0;
7241
 
7242
  strcpy(name, "default");
7243
  result = snd_ctl_open( &handle, "default", 0 );
7244
  if (result == 0) {
7245
    nDevices++;
7246
    snd_ctl_close( handle );
7247
  }
7248
 
7249
  // Count cards and devices
7250
  card = -1;
7251
  snd_card_next( &card );
7252
  while ( card >= 0 ) {
7253
    sprintf( name, "hw:%d", card );
7254
    result = snd_ctl_open( &handle, name, 0 );
7255
    if ( result < 0 ) {
7256
      handle = 0;
7257
      errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7258
      errorText_ = errorStream_.str();
7259
      error( RtAudioError::WARNING );
7260
      goto nextcard;
7261
    }
7262
    subdevice = -1;
7263
    while( 1 ) {
7264
      result = snd_ctl_pcm_next_device( handle, &subdevice );
7265
      if ( result < 0 ) {
7266
        errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7267
        errorText_ = errorStream_.str();
7268
        error( RtAudioError::WARNING );
7269
        break;
7270
      }
7271
      if ( subdevice < 0 )
7272
        break;
7273
      nDevices++;
7274
    }
7275
  nextcard:
7276
    if ( handle )
7277
        snd_ctl_close( handle );
7278
    snd_card_next( &card );
7279
  }
7280
 
7281
  return nDevices;
7282
}
7283
 
7284
RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7285
{
7286
  RtAudio::DeviceInfo info;
7287
  info.probed = false;
7288
 
7289
  unsigned nDevices = 0;
7290
  int result=-1, subdevice=-1, card=-1;
7291
  char name[64];
7292
  snd_ctl_t *chandle = 0;
7293
 
7294
  result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7295
  if ( result == 0 ) {
7296
    if ( nDevices++ == device ) {
7297
      strcpy( name, "default" );
7298
      goto foundDevice;
7299
    }
7300
  }
7301
  if ( chandle )
7302
    snd_ctl_close( chandle );
7303
 
7304
  // Count cards and devices
7305
  snd_card_next( &card );
7306
  while ( card >= 0 ) {
7307
    sprintf( name, "hw:%d", card );
7308
    result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7309
    if ( result < 0 ) {
7310
      chandle = 0;
7311
      errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7312
      errorText_ = errorStream_.str();
7313
      error( RtAudioError::WARNING );
7314
      goto nextcard;
7315
    }
7316
    subdevice = -1;
7317
    while( 1 ) {
7318
      result = snd_ctl_pcm_next_device( chandle, &subdevice );
7319
      if ( result < 0 ) {
7320
        errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7321
        errorText_ = errorStream_.str();
7322
        error( RtAudioError::WARNING );
7323
        break;
7324
      }
7325
      if ( subdevice < 0 ) break;
7326
      if ( nDevices == device ) {
7327
        sprintf( name, "hw:%d,%d", card, subdevice );
7328
        goto foundDevice;
7329
      }
7330
      nDevices++;
7331
    }
7332
  nextcard:
7333
    if ( chandle )
7334
        snd_ctl_close( chandle );
7335
    snd_card_next( &card );
7336
  }
7337
 
7338
  if ( nDevices == 0 ) {
7339
    errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7340
    error( RtAudioError::INVALID_USE );
7341
    return info;
7342
  }
7343
 
7344
  if ( device >= nDevices ) {
7345
    errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7346
    error( RtAudioError::INVALID_USE );
7347
    return info;
7348
  }
7349
 
7350
 foundDevice:
7351
 
7352
  // If a stream is already open, we cannot probe the stream devices.
7353
  // Thus, use the saved results.
7354
  if ( stream_.state != STREAM_CLOSED &&
7355
       ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7356
    snd_ctl_close( chandle );
7357
    if ( device >= devices_.size() ) {
7358
      errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7359
      error( RtAudioError::WARNING );
7360
      return info;
7361
    }
7362
    return devices_[ device ];
7363
  }
7364
 
7365
  int openMode = SND_PCM_ASYNC;
7366
  snd_pcm_stream_t stream;
7367
  snd_pcm_info_t *pcminfo;
7368
  snd_pcm_info_alloca( &pcminfo );
7369
  snd_pcm_t *phandle;
7370
  snd_pcm_hw_params_t *params;
7371
  snd_pcm_hw_params_alloca( &params );
7372
 
7373
  // First try for playback unless default device (which has subdev -1)
7374
  stream = SND_PCM_STREAM_PLAYBACK;
7375
  snd_pcm_info_set_stream( pcminfo, stream );
7376
  if ( subdevice != -1 ) {
7377
    snd_pcm_info_set_device( pcminfo, subdevice );
7378
    snd_pcm_info_set_subdevice( pcminfo, 0 );
7379
 
7380
    result = snd_ctl_pcm_info( chandle, pcminfo );
7381
    if ( result < 0 ) {
7382
      // Device probably doesn't support playback.
7383
      goto captureProbe;
7384
    }
7385
  }
7386
 
7387
  result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7388
  if ( result < 0 ) {
7389
    errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7390
    errorText_ = errorStream_.str();
7391
    error( RtAudioError::WARNING );
7392
    goto captureProbe;
7393
  }
7394
 
7395
  // The device is open ... fill the parameter structure.
7396
  result = snd_pcm_hw_params_any( phandle, params );
7397
  if ( result < 0 ) {
7398
    snd_pcm_close( phandle );
7399
    errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7400
    errorText_ = errorStream_.str();
7401
    error( RtAudioError::WARNING );
7402
    goto captureProbe;
7403
  }
7404
 
7405
  // Get output channel information.
7406
  unsigned int value;
7407
  result = snd_pcm_hw_params_get_channels_max( params, &value );
7408
  if ( result < 0 ) {
7409
    snd_pcm_close( phandle );
7410
    errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7411
    errorText_ = errorStream_.str();
7412
    error( RtAudioError::WARNING );
7413
    goto captureProbe;
7414
  }
7415
  info.outputChannels = value;
7416
  snd_pcm_close( phandle );
7417
 
7418
 captureProbe:
7419
  stream = SND_PCM_STREAM_CAPTURE;
7420
  snd_pcm_info_set_stream( pcminfo, stream );
7421
 
7422
  // Now try for capture unless default device (with subdev = -1)
7423
  if ( subdevice != -1 ) {
7424
    result = snd_ctl_pcm_info( chandle, pcminfo );
7425
    snd_ctl_close( chandle );
7426
    if ( result < 0 ) {
7427
      // Device probably doesn't support capture.
7428
      if ( info.outputChannels == 0 ) return info;
7429
      goto probeParameters;
7430
    }
7431
  }
7432
  else
7433
    snd_ctl_close( chandle );
7434
 
7435
  result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7436
  if ( result < 0 ) {
7437
    errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7438
    errorText_ = errorStream_.str();
7439
    error( RtAudioError::WARNING );
7440
    if ( info.outputChannels == 0 ) return info;
7441
    goto probeParameters;
7442
  }
7443
 
7444
  // The device is open ... fill the parameter structure.
7445
  result = snd_pcm_hw_params_any( phandle, params );
7446
  if ( result < 0 ) {
7447
    snd_pcm_close( phandle );
7448
    errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7449
    errorText_ = errorStream_.str();
7450
    error( RtAudioError::WARNING );
7451
    if ( info.outputChannels == 0 ) return info;
7452
    goto probeParameters;
7453
  }
7454
 
7455
  result = snd_pcm_hw_params_get_channels_max( params, &value );
7456
  if ( result < 0 ) {
7457
    snd_pcm_close( phandle );
7458
    errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7459
    errorText_ = errorStream_.str();
7460
    error( RtAudioError::WARNING );
7461
    if ( info.outputChannels == 0 ) return info;
7462
    goto probeParameters;
7463
  }
7464
  info.inputChannels = value;
7465
  snd_pcm_close( phandle );
7466
 
7467
  // If device opens for both playback and capture, we determine the channels.
7468
  if ( info.outputChannels > 0 && info.inputChannels > 0 )
7469
    info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7470
 
7471
  // ALSA doesn't provide default devices so we'll use the first available one.
7472
  if ( device == 0 && info.outputChannels > 0 )
7473
    info.isDefaultOutput = true;
7474
  if ( device == 0 && info.inputChannels > 0 )
7475
    info.isDefaultInput = true;
7476
 
7477
 probeParameters:
7478
  // At this point, we just need to figure out the supported data
7479
  // formats and sample rates.  We'll proceed by opening the device in
7480
  // the direction with the maximum number of channels, or playback if
7481
  // they are equal.  This might limit our sample rate options, but so
7482
  // be it.
7483
 
7484
  if ( info.outputChannels >= info.inputChannels )
7485
    stream = SND_PCM_STREAM_PLAYBACK;
7486
  else
7487
    stream = SND_PCM_STREAM_CAPTURE;
7488
  snd_pcm_info_set_stream( pcminfo, stream );
7489
 
7490
  result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7491
  if ( result < 0 ) {
7492
    errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7493
    errorText_ = errorStream_.str();
7494
    error( RtAudioError::WARNING );
7495
    return info;
7496
  }
7497
 
7498
  // The device is open ... fill the parameter structure.
7499
  result = snd_pcm_hw_params_any( phandle, params );
7500
  if ( result < 0 ) {
7501
    snd_pcm_close( phandle );
7502
    errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7503
    errorText_ = errorStream_.str();
7504
    error( RtAudioError::WARNING );
7505
    return info;
7506
  }
7507
 
7508
  // Test our discrete set of sample rate values.
7509
  info.sampleRates.clear();
7510
  for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7511
    if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7512
      info.sampleRates.push_back( SAMPLE_RATES[i] );
7513
 
7514
      if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7515
        info.preferredSampleRate = SAMPLE_RATES[i];
7516
    }
7517
  }
7518
  if ( info.sampleRates.size() == 0 ) {
7519
    snd_pcm_close( phandle );
7520
    errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7521
    errorText_ = errorStream_.str();
7522
    error( RtAudioError::WARNING );
7523
    return info;
7524
  }
7525
 
7526
  // Probe the supported data formats ... we don't care about endian-ness just yet
7527
  snd_pcm_format_t format;
7528
  info.nativeFormats = 0;
7529
  format = SND_PCM_FORMAT_S8;
7530
  if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7531
    info.nativeFormats |= RTAUDIO_SINT8;
7532
  format = SND_PCM_FORMAT_S16;
7533
  if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7534
    info.nativeFormats |= RTAUDIO_SINT16;
7535
  format = SND_PCM_FORMAT_S24;
7536
  if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7537
    info.nativeFormats |= RTAUDIO_SINT24;
7538
  format = SND_PCM_FORMAT_S32;
7539
  if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7540
    info.nativeFormats |= RTAUDIO_SINT32;
7541
  format = SND_PCM_FORMAT_FLOAT;
7542
  if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7543
    info.nativeFormats |= RTAUDIO_FLOAT32;
7544
  format = SND_PCM_FORMAT_FLOAT64;
7545
  if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7546
    info.nativeFormats |= RTAUDIO_FLOAT64;
7547
 
7548
  // Check that we have at least one supported format
7549
  if ( info.nativeFormats == 0 ) {
7550
    snd_pcm_close( phandle );
7551
    errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7552
    errorText_ = errorStream_.str();
7553
    error( RtAudioError::WARNING );
7554
    return info;
7555
  }
7556
 
7557
  // Get the device name
7558
  if (strncmp(name, "default", 7)!=0) {
7559
    char *cardname;
7560
    result = snd_card_get_name( card, &cardname );
7561
    if ( result >= 0 ) {
7562
      sprintf( name, "hw:%s,%d", cardname, subdevice );
7563
      free( cardname );
7564
    }
7565
  }
7566
  info.name = name;
7567
 
7568
  // That's all ... close the device and return
7569
  snd_pcm_close( phandle );
7570
  info.probed = true;
7571
  return info;
7572
}
7573
 
7574
void RtApiAlsa :: saveDeviceInfo( void )
7575
{
7576
  devices_.clear();
7577
 
7578
  unsigned int nDevices = getDeviceCount();
7579
  devices_.resize( nDevices );
7580
  for ( unsigned int i=0; i<nDevices; i++ )
7581
    devices_[i] = getDeviceInfo( i );
7582
}
7583
 
7584
bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7585
                                   unsigned int firstChannel, unsigned int sampleRate,
7586
                                   RtAudioFormat format, unsigned int *bufferSize,
7587
                                   RtAudio::StreamOptions *options )
7588
 
7589
{
7590
#if defined(__RTAUDIO_DEBUG__)
7591
  struct SndOutputTdealloc {
7592
    SndOutputTdealloc() : _out(NULL) { snd_output_stdio_attach(&_out, stderr, 0); }
7593
    ~SndOutputTdealloc() { snd_output_close(_out); }
7594
    operator snd_output_t*() { return _out; }
7595
    snd_output_t *_out;
7596
  } out;
7597
#endif
7598
 
7599
  // I'm not using the "plug" interface ... too much inconsistent behavior.
7600
 
7601
  unsigned nDevices = 0;
7602
  int result, subdevice, card;
7603
  char name[64];
7604
  snd_ctl_t *chandle;
7605
 
7606
  if ( device == 0
7607
       || (options && options->flags & RTAUDIO_ALSA_USE_DEFAULT) )
7608
  {
7609
    strcpy(name, "default");
7610
    result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7611
    if ( result == 0 ) {
7612
      if ( nDevices == device ) {
7613
        strcpy( name, "default" );
7614
        snd_ctl_close( chandle );
7615
        goto foundDevice;
7616
      }
7617
      nDevices++;
7618
    }
7619
  }
7620
 
7621
  else {
7622
    nDevices++;
7623
    // Count cards and devices
7624
    card = -1;
7625
    snd_card_next( &card );
7626
    while ( card >= 0 ) {
7627
      sprintf( name, "hw:%d", card );
7628
      result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7629
      if ( result < 0 ) {
7630
        errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7631
        errorText_ = errorStream_.str();
7632
        return FAILURE;
7633
      }
7634
      subdevice = -1;
7635
      while( 1 ) {
7636
        result = snd_ctl_pcm_next_device( chandle, &subdevice );
7637
        if ( result < 0 ) break;
7638
        if ( subdevice < 0 ) break;
7639
        if ( nDevices == device ) {
7640
          sprintf( name, "hw:%d,%d", card, subdevice );
7641
          snd_ctl_close( chandle );
7642
          goto foundDevice;
7643
        }
7644
        nDevices++;
7645
      }
7646
      snd_ctl_close( chandle );
7647
      snd_card_next( &card );
7648
    }
7649
 
7650
    if ( nDevices == 0 ) {
7651
      // This should not happen because a check is made before this function is called.
7652
      errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7653
      return FAILURE;
7654
    }
7655
 
7656
    if ( device >= nDevices ) {
7657
      // This should not happen because a check is made before this function is called.
7658
      errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7659
      return FAILURE;
7660
    }
7661
  }
7662
 
7663
 foundDevice:
7664
 
7665
  // The getDeviceInfo() function will not work for a device that is
7666
  // already open.  Thus, we'll probe the system before opening a
7667
  // stream and save the results for use by getDeviceInfo().
7668
  if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7669
    this->saveDeviceInfo();
7670
 
7671
  snd_pcm_stream_t stream;
7672
  if ( mode == OUTPUT )
7673
    stream = SND_PCM_STREAM_PLAYBACK;
7674
  else
7675
    stream = SND_PCM_STREAM_CAPTURE;
7676
 
7677
  snd_pcm_t *phandle;
7678
  int openMode = SND_PCM_ASYNC;
7679
  result = snd_pcm_open( &phandle, name, stream, openMode );
7680
  if ( result < 0 ) {
7681
    if ( mode == OUTPUT )
7682
      errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7683
    else
7684
      errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7685
    errorText_ = errorStream_.str();
7686
    return FAILURE;
7687
  }
7688
 
7689
  // Fill the parameter structure.
7690
  snd_pcm_hw_params_t *hw_params;
7691
  snd_pcm_hw_params_alloca( &hw_params );
7692
  result = snd_pcm_hw_params_any( phandle, hw_params );
7693
  if ( result < 0 ) {
7694
    snd_pcm_close( phandle );
7695
    errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7696
    errorText_ = errorStream_.str();
7697
    return FAILURE;
7698
  }
7699
 
7700
#if defined(__RTAUDIO_DEBUG__)
7701
  fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7702
  snd_pcm_hw_params_dump( hw_params, out );
7703
#endif
7704
 
7705
  // Set access ... check user preference.
7706
  if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7707
    stream_.userInterleaved = false;
7708
    result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7709
    if ( result < 0 ) {
7710
      result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7711
      stream_.deviceInterleaved[mode] =  true;
7712
    }
7713
    else
7714
      stream_.deviceInterleaved[mode] = false;
7715
  }
7716
  else {
7717
    stream_.userInterleaved = true;
7718
    result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7719
    if ( result < 0 ) {
7720
      result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7721
      stream_.deviceInterleaved[mode] =  false;
7722
    }
7723
    else
7724
      stream_.deviceInterleaved[mode] =  true;
7725
  }
7726
 
7727
  if ( result < 0 ) {
7728
    snd_pcm_close( phandle );
7729
    errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7730
    errorText_ = errorStream_.str();
7731
    return FAILURE;
7732
  }
7733
 
7734
  // Determine how to set the device format.
7735
  stream_.userFormat = format;
7736
  snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7737
 
7738
  if ( format == RTAUDIO_SINT8 )
7739
    deviceFormat = SND_PCM_FORMAT_S8;
7740
  else if ( format == RTAUDIO_SINT16 )
7741
    deviceFormat = SND_PCM_FORMAT_S16;
7742
  else if ( format == RTAUDIO_SINT24 )
7743
    deviceFormat = SND_PCM_FORMAT_S24;
7744
  else if ( format == RTAUDIO_SINT32 )
7745
    deviceFormat = SND_PCM_FORMAT_S32;
7746
  else if ( format == RTAUDIO_FLOAT32 )
7747
    deviceFormat = SND_PCM_FORMAT_FLOAT;
7748
  else if ( format == RTAUDIO_FLOAT64 )
7749
    deviceFormat = SND_PCM_FORMAT_FLOAT64;
7750
 
7751
  if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7752
    stream_.deviceFormat[mode] = format;
7753
    goto setFormat;
7754
  }
7755
 
7756
  // The user requested format is not natively supported by the device.
7757
  deviceFormat = SND_PCM_FORMAT_FLOAT64;
7758
  if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7759
    stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7760
    goto setFormat;
7761
  }
7762
 
7763
  deviceFormat = SND_PCM_FORMAT_FLOAT;
7764
  if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7765
    stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7766
    goto setFormat;
7767
  }
7768
 
7769
  deviceFormat = SND_PCM_FORMAT_S32;
7770
  if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7771
    stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7772
    goto setFormat;
7773
  }
7774
 
7775
  deviceFormat = SND_PCM_FORMAT_S24;
7776
  if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7777
    stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7778
    goto setFormat;
7779
  }
7780
 
7781
  deviceFormat = SND_PCM_FORMAT_S16;
7782
  if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7783
    stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7784
    goto setFormat;
7785
  }
7786
 
7787
  deviceFormat = SND_PCM_FORMAT_S8;
7788
  if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7789
    stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7790
    goto setFormat;
7791
  }
7792
 
7793
  // If we get here, no supported format was found.
7794
  snd_pcm_close( phandle );
7795
  errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7796
  errorText_ = errorStream_.str();
7797
  return FAILURE;
7798
 
7799
 setFormat:
7800
  result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7801
  if ( result < 0 ) {
7802
    snd_pcm_close( phandle );
7803
    errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7804
    errorText_ = errorStream_.str();
7805
    return FAILURE;
7806
  }
7807
 
7808
  // Determine whether byte-swaping is necessary.
7809
  stream_.doByteSwap[mode] = false;
7810
  if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7811
    result = snd_pcm_format_cpu_endian( deviceFormat );
7812
    if ( result == 0 )
7813
      stream_.doByteSwap[mode] = true;
7814
    else if (result < 0) {
7815
      snd_pcm_close( phandle );
7816
      errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7817
      errorText_ = errorStream_.str();
7818
      return FAILURE;
7819
    }
7820
  }
7821
 
7822
  // Set the sample rate.
7823
  result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7824
  if ( result < 0 ) {
7825
    snd_pcm_close( phandle );
7826
    errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7827
    errorText_ = errorStream_.str();
7828
    return FAILURE;
7829
  }
7830
 
7831
  // Determine the number of channels for this device.  We support a possible
7832
  // minimum device channel number > than the value requested by the user.
7833
  stream_.nUserChannels[mode] = channels;
7834
  unsigned int value;
7835
  result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7836
  unsigned int deviceChannels = value;
7837
  if ( result < 0 || deviceChannels < channels + firstChannel ) {
7838
    snd_pcm_close( phandle );
7839
    errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7840
    errorText_ = errorStream_.str();
7841
    return FAILURE;
7842
  }
7843
 
7844
  result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7845
  if ( result < 0 ) {
7846
    snd_pcm_close( phandle );
7847
    errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7848
    errorText_ = errorStream_.str();
7849
    return FAILURE;
7850
  }
7851
  deviceChannels = value;
7852
  if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7853
  stream_.nDeviceChannels[mode] = deviceChannels;
7854
 
7855
  // Set the device channels.
7856
  result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7857
  if ( result < 0 ) {
7858
    snd_pcm_close( phandle );
7859
    errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7860
    errorText_ = errorStream_.str();
7861
    return FAILURE;
7862
  }
7863
 
7864
  // Set the buffer (or period) size.
7865
  int dir = 0;
7866
  snd_pcm_uframes_t periodSize = *bufferSize;
7867
  result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7868
  if ( result < 0 ) {
7869
    snd_pcm_close( phandle );
7870
    errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7871
    errorText_ = errorStream_.str();
7872
    return FAILURE;
7873
  }
7874
  *bufferSize = periodSize;
7875
 
7876
  // Set the buffer number, which in ALSA is referred to as the "period".
7877
  unsigned int periods = 0;
7878
  if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7879
  if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7880
  if ( periods < 2 ) periods = 4; // a fairly safe default value
7881
  result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7882
  if ( result < 0 ) {
7883
    snd_pcm_close( phandle );
7884
    errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7885
    errorText_ = errorStream_.str();
7886
    return FAILURE;
7887
  }
7888
 
7889
  // If attempting to setup a duplex stream, the bufferSize parameter
7890
  // MUST be the same in both directions!
7891
  if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7892
    snd_pcm_close( phandle );
7893
    errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7894
    errorText_ = errorStream_.str();
7895
    return FAILURE;
7896
  }
7897
 
7898
  stream_.bufferSize = *bufferSize;
7899
 
7900
  // Install the hardware configuration
7901
  result = snd_pcm_hw_params( phandle, hw_params );
7902
  if ( result < 0 ) {
7903
    snd_pcm_close( phandle );
7904
    errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7905
    errorText_ = errorStream_.str();
7906
    return FAILURE;
7907
  }
7908
 
7909
#if defined(__RTAUDIO_DEBUG__)
7910
  fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7911
  snd_pcm_hw_params_dump( hw_params, out );
7912
#endif
7913
 
7914
  // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7915
  snd_pcm_sw_params_t *sw_params = NULL;
7916
  snd_pcm_sw_params_alloca( &sw_params );
7917
  snd_pcm_sw_params_current( phandle, sw_params );
7918
  snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7919
  snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7920
  snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7921
 
7922
  // The following two settings were suggested by Theo Veenker
7923
  //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7924
  //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7925
 
7926
  // here are two options for a fix
7927
  //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7928
  snd_pcm_uframes_t val;
7929
  snd_pcm_sw_params_get_boundary( sw_params, &val );
7930
  snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7931
 
7932
  result = snd_pcm_sw_params( phandle, sw_params );
7933
  if ( result < 0 ) {
7934
    snd_pcm_close( phandle );
7935
    errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7936
    errorText_ = errorStream_.str();
7937
    return FAILURE;
7938
  }
7939
 
7940
#if defined(__RTAUDIO_DEBUG__)
7941
  fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7942
  snd_pcm_sw_params_dump( sw_params, out );
7943
#endif
7944
 
7945
  // Set flags for buffer conversion
7946
  stream_.doConvertBuffer[mode] = false;
7947
  if ( stream_.userFormat != stream_.deviceFormat[mode] )
7948
    stream_.doConvertBuffer[mode] = true;
7949
  if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7950
    stream_.doConvertBuffer[mode] = true;
7951
  if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7952
       stream_.nUserChannels[mode] > 1 )
7953
    stream_.doConvertBuffer[mode] = true;
7954
 
7955
  // Allocate the ApiHandle if necessary and then save.
7956
  AlsaHandle *apiInfo = 0;
7957
  if ( stream_.apiHandle == 0 ) {
7958
    try {
7959
      apiInfo = (AlsaHandle *) new AlsaHandle;
7960
    }
7961
    catch ( std::bad_alloc& ) {
7962
      errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7963
      goto error;
7964
    }
7965
 
7966
    if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7967
      errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7968
      goto error;
7969
    }
7970
 
7971
    stream_.apiHandle = (void *) apiInfo;
7972
    apiInfo->handles[0] = 0;
7973
    apiInfo->handles[1] = 0;
7974
  }
7975
  else {
7976
    apiInfo = (AlsaHandle *) stream_.apiHandle;
7977
  }
7978
  apiInfo->handles[mode] = phandle;
7979
  phandle = 0;
7980
 
7981
  // Allocate necessary internal buffers.
7982
  unsigned long bufferBytes;
7983
  bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7984
  stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7985
  if ( stream_.userBuffer[mode] == NULL ) {
7986
    errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7987
    goto error;
7988
  }
7989
 
7990
  if ( stream_.doConvertBuffer[mode] ) {
7991
 
7992
    bool makeBuffer = true;
7993
    bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7994
    if ( mode == INPUT ) {
7995
      if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7996
        unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7997
        if ( bufferBytes <= bytesOut ) makeBuffer = false;
7998
      }
7999
    }
8000
 
8001
    if ( makeBuffer ) {
8002
      bufferBytes *= *bufferSize;
8003
      if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8004
      stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8005
      if ( stream_.deviceBuffer == NULL ) {
8006
        errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
8007
        goto error;
8008
      }
8009
    }
8010
  }
8011
 
8012
  stream_.sampleRate = sampleRate;
8013
  stream_.nBuffers = periods;
8014
  stream_.device[mode] = device;
8015
  stream_.state = STREAM_STOPPED;
8016
 
8017
  // Setup the buffer conversion information structure.
8018
  if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8019
 
8020
  // Setup thread if necessary.
8021
  if ( stream_.mode == OUTPUT && mode == INPUT ) {
8022
    // We had already set up an output stream.
8023
    stream_.mode = DUPLEX;
8024
    // Link the streams if possible.
8025
    apiInfo->synchronized = false;
8026
    if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
8027
      apiInfo->synchronized = true;
8028
    else {
8029
      errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
8030
      error( RtAudioError::WARNING );
8031
    }
8032
  }
8033
  else {
8034
    stream_.mode = mode;
8035
 
8036
    // Setup callback thread.
8037
    stream_.callbackInfo.object = (void *) this;
8038
 
8039
    // Set the thread attributes for joinable and realtime scheduling
8040
    // priority (optional).  The higher priority will only take affect
8041
    // if the program is run as root or suid. Note, under Linux
8042
    // processes with CAP_SYS_NICE privilege, a user can change
8043
    // scheduling policy and priority (thus need not be root). See
8044
    // POSIX "capabilities".
8045
    pthread_attr_t attr;
8046
    pthread_attr_init( &attr );
8047
    pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8048
#ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8049
    if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8050
      stream_.callbackInfo.doRealtime = true;
8051
      struct sched_param param;
8052
      int priority = options->priority;
8053
      int min = sched_get_priority_min( SCHED_RR );
8054
      int max = sched_get_priority_max( SCHED_RR );
8055
      if ( priority < min ) priority = min;
8056
      else if ( priority > max ) priority = max;
8057
      param.sched_priority = priority;
8058
 
8059
      // Set the policy BEFORE the priority. Otherwise it fails.
8060
      pthread_attr_setschedpolicy(&attr, SCHED_RR);
8061
      pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8062
      // This is definitely required. Otherwise it fails.
8063
      pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8064
      pthread_attr_setschedparam(&attr, &param);
8065
    }
8066
    else
8067
      pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8068
#else
8069
    pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8070
#endif
8071
 
8072
    stream_.callbackInfo.isRunning = true;
8073
    result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8074
    pthread_attr_destroy( &attr );
8075
    if ( result ) {
8076
      // Failed. Try instead with default attributes.
8077
      result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8078
      if ( result ) {
8079
        stream_.callbackInfo.isRunning = false;
8080
        errorText_ = "RtApiAlsa::error creating callback thread!";
8081
        goto error;
8082
      }
8083
    }
8084
  }
8085
 
8086
  return SUCCESS;
8087
 
8088
 error:
8089
  if ( apiInfo ) {
8090
    pthread_cond_destroy( &apiInfo->runnable_cv );
8091
    if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8092
    if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8093
    delete apiInfo;
8094
    stream_.apiHandle = 0;
8095
  }
8096
 
8097
  if ( phandle) snd_pcm_close( phandle );
8098
 
8099
  for ( int i=0; i<2; i++ ) {
8100
    if ( stream_.userBuffer[i] ) {
8101
      free( stream_.userBuffer[i] );
8102
      stream_.userBuffer[i] = 0;
8103
    }
8104
  }
8105
 
8106
  if ( stream_.deviceBuffer ) {
8107
    free( stream_.deviceBuffer );
8108
    stream_.deviceBuffer = 0;
8109
  }
8110
 
8111
  stream_.state = STREAM_CLOSED;
8112
  return FAILURE;
8113
}
8114
 
8115
void RtApiAlsa :: closeStream()
8116
{
8117
  if ( stream_.state == STREAM_CLOSED ) {
8118
    errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8119
    error( RtAudioError::WARNING );
8120
    return;
8121
  }
8122
 
8123
  AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8124
  stream_.callbackInfo.isRunning = false;
8125
  MUTEX_LOCK( &stream_.mutex );
8126
  if ( stream_.state == STREAM_STOPPED ) {
8127
    apiInfo->runnable = true;
8128
    pthread_cond_signal( &apiInfo->runnable_cv );
8129
  }
8130
  MUTEX_UNLOCK( &stream_.mutex );
8131
  pthread_join( stream_.callbackInfo.thread, NULL );
8132
 
8133
  if ( stream_.state == STREAM_RUNNING ) {
8134
    stream_.state = STREAM_STOPPED;
8135
    if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8136
      snd_pcm_drop( apiInfo->handles[0] );
8137
    if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8138
      snd_pcm_drop( apiInfo->handles[1] );
8139
  }
8140
 
8141
  if ( apiInfo ) {
8142
    pthread_cond_destroy( &apiInfo->runnable_cv );
8143
    if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8144
    if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8145
    delete apiInfo;
8146
    stream_.apiHandle = 0;
8147
  }
8148
 
8149
  for ( int i=0; i<2; i++ ) {
8150
    if ( stream_.userBuffer[i] ) {
8151
      free( stream_.userBuffer[i] );
8152
      stream_.userBuffer[i] = 0;
8153
    }
8154
  }
8155
 
8156
  if ( stream_.deviceBuffer ) {
8157
    free( stream_.deviceBuffer );
8158
    stream_.deviceBuffer = 0;
8159
  }
8160
 
8161
  stream_.mode = UNINITIALIZED;
8162
  stream_.state = STREAM_CLOSED;
8163
}
8164
 
8165
void RtApiAlsa :: startStream()
8166
{
8167
  // This method calls snd_pcm_prepare if the device isn't already in that state.
8168
 
8169
  verifyStream();
8170
  if ( stream_.state == STREAM_RUNNING ) {
8171
    errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8172
    error( RtAudioError::WARNING );
8173
    return;
8174
  }
8175
 
8176
  MUTEX_LOCK( &stream_.mutex );
8177
 
8178
  #if defined( HAVE_GETTIMEOFDAY )
8179
  gettimeofday( &stream_.lastTickTimestamp, NULL );
8180
  #endif
8181
 
8182
  int result = 0;
8183
  snd_pcm_state_t state;
8184
  AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8185
  snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8186
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8187
    state = snd_pcm_state( handle[0] );
8188
    if ( state != SND_PCM_STATE_PREPARED ) {
8189
      result = snd_pcm_prepare( handle[0] );
8190
      if ( result < 0 ) {
8191
        errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8192
        errorText_ = errorStream_.str();
8193
        goto unlock;
8194
      }
8195
    }
8196
  }
8197
 
8198
  if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8199
    result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8200
    state = snd_pcm_state( handle[1] );
8201
    if ( state != SND_PCM_STATE_PREPARED ) {
8202
      result = snd_pcm_prepare( handle[1] );
8203
      if ( result < 0 ) {
8204
        errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8205
        errorText_ = errorStream_.str();
8206
        goto unlock;
8207
      }
8208
    }
8209
  }
8210
 
8211
  stream_.state = STREAM_RUNNING;
8212
 
8213
 unlock:
8214
  apiInfo->runnable = true;
8215
  pthread_cond_signal( &apiInfo->runnable_cv );
8216
  MUTEX_UNLOCK( &stream_.mutex );
8217
 
8218
  if ( result >= 0 ) return;
8219
  error( RtAudioError::SYSTEM_ERROR );
8220
}
8221
 
8222
void RtApiAlsa :: stopStream()
8223
{
8224
  verifyStream();
8225
  if ( stream_.state == STREAM_STOPPED ) {
8226
    errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8227
    error( RtAudioError::WARNING );
8228
    return;
8229
  }
8230
 
8231
  stream_.state = STREAM_STOPPED;
8232
  MUTEX_LOCK( &stream_.mutex );
8233
 
8234
  int result = 0;
8235
  AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8236
  snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8237
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8238
    if ( apiInfo->synchronized )
8239
      result = snd_pcm_drop( handle[0] );
8240
    else
8241
      result = snd_pcm_drain( handle[0] );
8242
    if ( result < 0 ) {
8243
      errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8244
      errorText_ = errorStream_.str();
8245
      goto unlock;
8246
    }
8247
  }
8248
 
8249
  if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8250
    result = snd_pcm_drop( handle[1] );
8251
    if ( result < 0 ) {
8252
      errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8253
      errorText_ = errorStream_.str();
8254
      goto unlock;
8255
    }
8256
  }
8257
 
8258
 unlock:
8259
  apiInfo->runnable = false; // fixes high CPU usage when stopped
8260
  MUTEX_UNLOCK( &stream_.mutex );
8261
 
8262
  if ( result >= 0 ) return;
8263
  error( RtAudioError::SYSTEM_ERROR );
8264
}
8265
 
8266
void RtApiAlsa :: abortStream()
8267
{
8268
  verifyStream();
8269
  if ( stream_.state == STREAM_STOPPED ) {
8270
    errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8271
    error( RtAudioError::WARNING );
8272
    return;
8273
  }
8274
 
8275
  stream_.state = STREAM_STOPPED;
8276
  MUTEX_LOCK( &stream_.mutex );
8277
 
8278
  int result = 0;
8279
  AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8280
  snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8281
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8282
    result = snd_pcm_drop( handle[0] );
8283
    if ( result < 0 ) {
8284
      errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8285
      errorText_ = errorStream_.str();
8286
      goto unlock;
8287
    }
8288
  }
8289
 
8290
  if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8291
    result = snd_pcm_drop( handle[1] );
8292
    if ( result < 0 ) {
8293
      errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8294
      errorText_ = errorStream_.str();
8295
      goto unlock;
8296
    }
8297
  }
8298
 
8299
 unlock:
8300
  apiInfo->runnable = false; // fixes high CPU usage when stopped
8301
  MUTEX_UNLOCK( &stream_.mutex );
8302
 
8303
  if ( result >= 0 ) return;
8304
  error( RtAudioError::SYSTEM_ERROR );
8305
}
8306
 
8307
void RtApiAlsa :: callbackEvent()
8308
{
8309
  AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8310
  if ( stream_.state == STREAM_STOPPED ) {
8311
    MUTEX_LOCK( &stream_.mutex );
8312
    while ( !apiInfo->runnable )
8313
      pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8314
 
8315
    if ( stream_.state != STREAM_RUNNING ) {
8316
      MUTEX_UNLOCK( &stream_.mutex );
8317
      return;
8318
    }
8319
    MUTEX_UNLOCK( &stream_.mutex );
8320
  }
8321
 
8322
  if ( stream_.state == STREAM_CLOSED ) {
8323
    errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8324
    error( RtAudioError::WARNING );
8325
    return;
8326
  }
8327
 
8328
  int doStopStream = 0;
8329
  RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8330
  double streamTime = getStreamTime();
8331
  RtAudioStreamStatus status = 0;
8332
  if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8333
    status |= RTAUDIO_OUTPUT_UNDERFLOW;
8334
    apiInfo->xrun[0] = false;
8335
  }
8336
  if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8337
    status |= RTAUDIO_INPUT_OVERFLOW;
8338
    apiInfo->xrun[1] = false;
8339
  }
8340
  doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8341
                           stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8342
 
8343
  if ( doStopStream == 2 ) {
8344
    abortStream();
8345
    return;
8346
  }
8347
 
8348
  MUTEX_LOCK( &stream_.mutex );
8349
 
8350
  // The state might change while waiting on a mutex.
8351
  if ( stream_.state == STREAM_STOPPED ) goto unlock;
8352
 
8353
  int result;
8354
  char *buffer;
8355
  int channels;
8356
  snd_pcm_t **handle;
8357
  snd_pcm_sframes_t frames;
8358
  RtAudioFormat format;
8359
  handle = (snd_pcm_t **) apiInfo->handles;
8360
 
8361
  if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8362
 
8363
    // Setup parameters.
8364
    if ( stream_.doConvertBuffer[1] ) {
8365
      buffer = stream_.deviceBuffer;
8366
      channels = stream_.nDeviceChannels[1];
8367
      format = stream_.deviceFormat[1];
8368
    }
8369
    else {
8370
      buffer = stream_.userBuffer[1];
8371
      channels = stream_.nUserChannels[1];
8372
      format = stream_.userFormat;
8373
    }
8374
 
8375
    // Read samples from device in interleaved/non-interleaved format.
8376
    if ( stream_.deviceInterleaved[1] )
8377
      result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8378
    else {
8379
      void *bufs[channels];
8380
      size_t offset = stream_.bufferSize * formatBytes( format );
8381
      for ( int i=0; i<channels; i++ )
8382
        bufs[i] = (void *) (buffer + (i * offset));
8383
      result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8384
    }
8385
 
8386
    if ( result < (int) stream_.bufferSize ) {
8387
      // Either an error or overrun occurred.
8388
      if ( result == -EPIPE ) {
8389
        snd_pcm_state_t state = snd_pcm_state( handle[1] );
8390
        if ( state == SND_PCM_STATE_XRUN ) {
8391
          apiInfo->xrun[1] = true;
8392
          result = snd_pcm_prepare( handle[1] );
8393
          if ( result < 0 ) {
8394
            errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8395
            errorText_ = errorStream_.str();
8396
          }
8397
        }
8398
        else {
8399
          errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8400
          errorText_ = errorStream_.str();
8401
        }
8402
      }
8403
      else {
8404
        errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8405
        errorText_ = errorStream_.str();
8406
      }
8407
      error( RtAudioError::WARNING );
8408
      goto tryOutput;
8409
    }
8410
 
8411
    // Do byte swapping if necessary.
8412
    if ( stream_.doByteSwap[1] )
8413
      byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8414
 
8415
    // Do buffer conversion if necessary.
8416
    if ( stream_.doConvertBuffer[1] )
8417
      convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8418
 
8419
    // Check stream latency
8420
    result = snd_pcm_delay( handle[1], &frames );
8421
    if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8422
  }
8423
 
8424
 tryOutput:
8425
 
8426
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8427
 
8428
    // Setup parameters and do buffer conversion if necessary.
8429
    if ( stream_.doConvertBuffer[0] ) {
8430
      buffer = stream_.deviceBuffer;
8431
      convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8432
      channels = stream_.nDeviceChannels[0];
8433
      format = stream_.deviceFormat[0];
8434
    }
8435
    else {
8436
      buffer = stream_.userBuffer[0];
8437
      channels = stream_.nUserChannels[0];
8438
      format = stream_.userFormat;
8439
    }
8440
 
8441
    // Do byte swapping if necessary.
8442
    if ( stream_.doByteSwap[0] )
8443
      byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8444
 
8445
    // Write samples to device in interleaved/non-interleaved format.
8446
    if ( stream_.deviceInterleaved[0] )
8447
      result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8448
    else {
8449
      void *bufs[channels];
8450
      size_t offset = stream_.bufferSize * formatBytes( format );
8451
      for ( int i=0; i<channels; i++ )
8452
        bufs[i] = (void *) (buffer + (i * offset));
8453
      result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8454
    }
8455
 
8456
    if ( result < (int) stream_.bufferSize ) {
8457
      // Either an error or underrun occurred.
8458
      if ( result == -EPIPE ) {
8459
        snd_pcm_state_t state = snd_pcm_state( handle[0] );
8460
        if ( state == SND_PCM_STATE_XRUN ) {
8461
          apiInfo->xrun[0] = true;
8462
          result = snd_pcm_prepare( handle[0] );
8463
          if ( result < 0 ) {
8464
            errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8465
            errorText_ = errorStream_.str();
8466
          }
8467
          else
8468
            errorText_ =  "RtApiAlsa::callbackEvent: audio write error, underrun.";
8469
        }
8470
        else {
8471
          errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8472
          errorText_ = errorStream_.str();
8473
        }
8474
      }
8475
      else {
8476
        errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8477
        errorText_ = errorStream_.str();
8478
      }
8479
      error( RtAudioError::WARNING );
8480
      goto unlock;
8481
    }
8482
 
8483
    // Check stream latency
8484
    result = snd_pcm_delay( handle[0], &frames );
8485
    if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8486
  }
8487
 
8488
 unlock:
8489
  MUTEX_UNLOCK( &stream_.mutex );
8490
 
8491
  RtApi::tickStreamTime();
8492
  if ( doStopStream == 1 ) this->stopStream();
8493
}
8494
 
8495
static void *alsaCallbackHandler( void *ptr )
8496
{
8497
  CallbackInfo *info = (CallbackInfo *) ptr;
8498
  RtApiAlsa *object = (RtApiAlsa *) info->object;
8499
  bool *isRunning = &info->isRunning;
8500
 
8501
#ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8502
  if ( info->doRealtime ) {
8503
    std::cerr << "RtAudio alsa: " <<
8504
             (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8505
             "running realtime scheduling" << std::endl;
8506
  }
8507
#endif
8508
 
8509
  while ( *isRunning == true ) {
8510
    pthread_testcancel();
8511
    object->callbackEvent();
8512
  }
8513
 
8514
  pthread_exit( NULL );
8515
}
8516
 
8517
//******************** End of __LINUX_ALSA__ *********************//
8518
#endif
8519
 
8520
#if defined(__LINUX_PULSE__)
8521
 
8522
// Code written by Peter Meerwald, pmeerw@pmeerw.net
8523
// and Tristan Matthews.
8524
 
8525
#include <pulse/error.h>
8526
#include <pulse/simple.h>
8527
#include <pulse/pulseaudio.h>
8528
#include <cstdio>
8529
 
8530
static pa_mainloop_api *rt_pa_mainloop_api = NULL;
8531
struct PaDeviceInfo {
8532
  PaDeviceInfo() : sink_index(-1), source_index(-1) {}
8533
  int sink_index;
8534
  int source_index;
8535
  std::string sink_name;
8536
  std::string source_name;
8537
  RtAudio::DeviceInfo info;
8538
};
8539
static struct {
8540
  std::vector<PaDeviceInfo> dev;
8541
  std::string default_sink_name;
8542
  std::string default_source_name;
8543
  int default_rate;
8544
} rt_pa_info;
8545
 
8546
static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8547
                                                      44100, 48000, 96000, 192000, 0};
8548
 
8549
struct rtaudio_pa_format_mapping_t {
8550
  RtAudioFormat rtaudio_format;
8551
  pa_sample_format_t pa_format;
8552
};
8553
 
8554
static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8555
  {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8556
  {RTAUDIO_SINT24, PA_SAMPLE_S24LE},
8557
  {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8558
  {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8559
  {0, PA_SAMPLE_INVALID}};
8560
 
8561
struct PulseAudioHandle {
8562
  pa_simple *s_play;
8563
  pa_simple *s_rec;
8564
  pthread_t thread;
8565
  pthread_cond_t runnable_cv;
8566
  bool runnable;
8567
  PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8568
};
8569
 
8570
static void rt_pa_mainloop_api_quit(int ret) {
8571
    rt_pa_mainloop_api->quit(rt_pa_mainloop_api, ret);
8572
}
8573
 
8574
static void rt_pa_set_server_info(pa_context *context, const pa_server_info *info, void *data){
8575
  (void)context;
8576
  (void)data;
8577
  pa_sample_spec ss;
8578
 
8579
  if (!info) {
8580
    rt_pa_mainloop_api_quit(1);
8581
    return;
8582
  }
8583
 
8584
  ss = info->sample_spec;
8585
 
8586
  rt_pa_info.default_rate = ss.rate;
8587
  rt_pa_info.default_sink_name = info->default_sink_name;
8588
  rt_pa_info.default_source_name = info->default_source_name;
8589
}
8590
 
8591
static void rt_pa_set_sink_info(pa_context * /*c*/, const pa_sink_info *i,
8592
                                int eol, void * /*userdata*/)
8593
{
8594
  if (eol) return;
8595
  PaDeviceInfo inf;
8596
  inf.info.name = pa_proplist_gets(i->proplist, "device.description");
8597
  inf.info.probed = true;
8598
  inf.info.outputChannels = i->sample_spec.channels;
8599
  inf.info.preferredSampleRate = i->sample_spec.rate;
8600
  inf.info.isDefaultOutput = (rt_pa_info.default_sink_name == i->name);
8601
  inf.sink_index = i->index;
8602
  inf.sink_name = i->name;
8603
  for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8604
    inf.info.sampleRates.push_back( *sr );
8605
  for ( const rtaudio_pa_format_mapping_t *fm = supported_sampleformats;
8606
        fm->rtaudio_format; ++fm )
8607
    inf.info.nativeFormats |= fm->rtaudio_format;
8608
  for (size_t i=0; i < rt_pa_info.dev.size(); i++)
8609
  {
8610
    /* Attempt to match up sink and source records by device description. */
8611
    if (rt_pa_info.dev[i].info.name == inf.info.name) {
8612
      rt_pa_info.dev[i].sink_index = inf.sink_index;
8613
      rt_pa_info.dev[i].sink_name = inf.sink_name;
8614
      rt_pa_info.dev[i].info.outputChannels = inf.info.outputChannels;
8615
      rt_pa_info.dev[i].info.isDefaultOutput = inf.info.isDefaultOutput;
8616
      /* Assume duplex channels are minimum of input and output channels. */
8617
      /* Uncomment if we add support for DUPLEX
8618
      if (rt_pa_info.dev[i].source_index > -1)
8619
        (inf.info.outputChannels < rt_pa_info.dev[i].info.inputChannels)
8620
          ? inf.info.outputChannels : rt_pa_info.dev[i].info.inputChannels;
8621
      */
8622
      return;
8623
    }
8624
  }
8625
  /* try to ensure device #0 is the default */
8626
  if (inf.info.isDefaultOutput)
8627
    rt_pa_info.dev.insert(rt_pa_info.dev.begin(), inf);
8628
  else
8629
    rt_pa_info.dev.push_back(inf);
8630
}
8631
 
8632
static void rt_pa_set_source_info_and_quit(pa_context * /*c*/, const pa_source_info *i,
8633
                                           int eol, void * /*userdata*/)
8634
{
8635
  if (eol) {
8636
    rt_pa_mainloop_api_quit(0);
8637
    return;
8638
  }
8639
  PaDeviceInfo inf;
8640
  inf.info.name = pa_proplist_gets(i->proplist, "device.description");
8641
  inf.info.probed = true;
8642
  inf.info.inputChannels = i->sample_spec.channels;
8643
  inf.info.preferredSampleRate = i->sample_spec.rate;
8644
  inf.info.isDefaultInput = (rt_pa_info.default_source_name == i->name);
8645
  inf.source_index = i->index;
8646
  inf.source_name = i->name;
8647
  for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8648
    inf.info.sampleRates.push_back( *sr );
8649
  for ( const rtaudio_pa_format_mapping_t *fm = supported_sampleformats;
8650
        fm->rtaudio_format; ++fm )
8651
    inf.info.nativeFormats |= fm->rtaudio_format;
8652
 
8653
  for (size_t i=0; i < rt_pa_info.dev.size(); i++)
8654
  {
8655
    /* Attempt to match up sink and source records by device description. */
8656
    if (rt_pa_info.dev[i].info.name == inf.info.name) {
8657
      rt_pa_info.dev[i].source_index = inf.source_index;
8658
      rt_pa_info.dev[i].source_name = inf.source_name;
8659
      rt_pa_info.dev[i].info.inputChannels = inf.info.inputChannels;
8660
      rt_pa_info.dev[i].info.isDefaultInput = inf.info.isDefaultInput;
8661
      /* Assume duplex channels are minimum of input and output channels. */
8662
      /* Uncomment if we add support for DUPLEX
8663
      if (rt_pa_info.dev[i].sink_index > -1) {
8664
        rt_pa_info.dev[i].info.duplexChannels =
8665
          (inf.info.inputChannels < rt_pa_info.dev[i].info.outputChannels)
8666
          ? inf.info.inputChannels : rt_pa_info.dev[i].info.outputChannels;
8667
      }
8668
      */
8669
      return;
8670
    }
8671
  }
8672
  /* try to ensure device #0 is the default */
8673
  if (inf.info.isDefaultInput)
8674
    rt_pa_info.dev.insert(rt_pa_info.dev.begin(), inf);
8675
  else
8676
    rt_pa_info.dev.push_back(inf);
8677
}
8678
 
8679
static void rt_pa_context_state_callback(pa_context *context, void *userdata) {
8680
  (void)userdata;
8681
 
8682
  auto state = pa_context_get_state(context);
8683
  switch (state) {
8684
    case PA_CONTEXT_CONNECTING:
8685
    case PA_CONTEXT_AUTHORIZING:
8686
    case PA_CONTEXT_SETTING_NAME:
8687
      break;
8688
 
8689
    case PA_CONTEXT_READY:
8690
      rt_pa_info.dev.clear();
8691
      pa_context_get_server_info(context, rt_pa_set_server_info, NULL);
8692
      pa_context_get_sink_info_list(context, rt_pa_set_sink_info, NULL);
8693
      pa_context_get_source_info_list(context, rt_pa_set_source_info_and_quit, NULL);
8694
      break;
8695
 
8696
    case PA_CONTEXT_TERMINATED:
8697
      rt_pa_mainloop_api_quit(0);
8698
      break;
8699
 
8700
    case PA_CONTEXT_FAILED:
8701
    default:
8702
      rt_pa_mainloop_api_quit(1);
8703
  }
8704
}
8705
 
8706
RtApiPulse::~RtApiPulse()
8707
{
8708
  if ( stream_.state != STREAM_CLOSED )
8709
    closeStream();
8710
}
8711
 
8712
void RtApiPulse::collectDeviceInfo( void )
8713
{
8714
  pa_context *context = NULL;
8715
  pa_mainloop *m = NULL;
8716
  char *server = NULL;
8717
  int ret = 1;
8718
 
8719
  if (!(m = pa_mainloop_new())) {
8720
    errorStream_ << "RtApiPulse::DeviceInfo pa_mainloop_new() failed.";
8721
    errorText_ = errorStream_.str();
8722
    error( RtAudioError::WARNING );
8723
    goto quit;
8724
  }
8725
 
8726
  rt_pa_mainloop_api = pa_mainloop_get_api(m);
8727
 
8728
  if (!(context = pa_context_new_with_proplist(rt_pa_mainloop_api, NULL, NULL))) {
8729
    errorStream_ << "pa_context_new() failed.";
8730
    errorText_ = errorStream_.str();
8731
    error( RtAudioError::WARNING );
8732
    goto quit;
8733
  }
8734
 
8735
  pa_context_set_state_callback(context, rt_pa_context_state_callback, NULL);
8736
 
8737
  if (pa_context_connect(context, server, PA_CONTEXT_NOFLAGS, NULL) < 0) {
8738
    errorStream_ << "RtApiPulse::DeviceInfo pa_context_connect() failed: "
8739
      << pa_strerror(pa_context_errno(context));
8740
    errorText_ = errorStream_.str();
8741
    error( RtAudioError::WARNING );
8742
    goto quit;
8743
  }
8744
 
8745
  if (pa_mainloop_run(m, &ret) < 0) {
8746
    errorStream_ << "pa_mainloop_run() failed.";
8747
    errorText_ = errorStream_.str();
8748
    error( RtAudioError::WARNING );
8749
    goto quit;
8750
  }
8751
 
8752
  if (ret != 0) {
8753
    errorStream_ << "could not get server info.";
8754
    errorText_ = errorStream_.str();
8755
    error( RtAudioError::WARNING );
8756
    goto quit;
8757
  }
8758
 
8759
quit:
8760
  if (context)
8761
    pa_context_unref(context);
8762
 
8763
  if (m) {
8764
    pa_mainloop_free(m);
8765
  }
8766
 
8767
  pa_xfree(server);
8768
}
8769
 
8770
unsigned int RtApiPulse::getDeviceCount( void )
8771
{
8772
  collectDeviceInfo();
8773
  return rt_pa_info.dev.size();
8774
}
8775
 
8776
RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int device )
8777
{
8778
  if (rt_pa_info.dev.size()==0)
8779
      collectDeviceInfo();
8780
  if (device < rt_pa_info.dev.size())
8781
    return rt_pa_info.dev[device].info;
8782
  return RtAudio::DeviceInfo();
8783
}
8784
 
8785
static void *pulseaudio_callback( void * user )
8786
{
8787
  CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8788
  RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8789
  volatile bool *isRunning = &cbi->isRunning;
8790
 
8791
#ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8792
  if (cbi->doRealtime) {
8793
    std::cerr << "RtAudio pulse: " <<
8794
             (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8795
             "running realtime scheduling" << std::endl;
8796
  }
8797
#endif
8798
 
8799
  while ( *isRunning ) {
8800
    pthread_testcancel();
8801
    context->callbackEvent();
8802
  }
8803
 
8804
  pthread_exit( NULL );
8805
}
8806
 
8807
void RtApiPulse::closeStream( void )
8808
{
8809
  PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8810
 
8811
  stream_.callbackInfo.isRunning = false;
8812
  if ( pah ) {
8813
    MUTEX_LOCK( &stream_.mutex );
8814
    if ( stream_.state == STREAM_STOPPED ) {
8815
      pah->runnable = true;
8816
      pthread_cond_signal( &pah->runnable_cv );
8817
    }
8818
    MUTEX_UNLOCK( &stream_.mutex );
8819
 
8820
    pthread_join( pah->thread, 0 );
8821
    if ( pah->s_play ) {
8822
      pa_simple_flush( pah->s_play, NULL );
8823
      pa_simple_free( pah->s_play );
8824
    }
8825
    if ( pah->s_rec )
8826
      pa_simple_free( pah->s_rec );
8827
 
8828
    pthread_cond_destroy( &pah->runnable_cv );
8829
    delete pah;
8830
    stream_.apiHandle = 0;
8831
  }
8832
 
8833
  if ( stream_.userBuffer[0] ) {
8834
    free( stream_.userBuffer[0] );
8835
    stream_.userBuffer[0] = 0;
8836
  }
8837
  if ( stream_.userBuffer[1] ) {
8838
    free( stream_.userBuffer[1] );
8839
    stream_.userBuffer[1] = 0;
8840
  }
8841
 
8842
  stream_.state = STREAM_CLOSED;
8843
  stream_.mode = UNINITIALIZED;
8844
}
8845
 
8846
void RtApiPulse::callbackEvent( void )
8847
{
8848
  PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8849
 
8850
  if ( stream_.state == STREAM_STOPPED ) {
8851
    MUTEX_LOCK( &stream_.mutex );
8852
    while ( !pah->runnable )
8853
      pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8854
 
8855
    if ( stream_.state != STREAM_RUNNING ) {
8856
      MUTEX_UNLOCK( &stream_.mutex );
8857
      return;
8858
    }
8859
    MUTEX_UNLOCK( &stream_.mutex );
8860
  }
8861
 
8862
  if ( stream_.state == STREAM_CLOSED ) {
8863
    errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8864
      "this shouldn't happen!";
8865
    error( RtAudioError::WARNING );
8866
    return;
8867
  }
8868
 
8869
  RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8870
  double streamTime = getStreamTime();
8871
  RtAudioStreamStatus status = 0;
8872
  int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8873
                               stream_.bufferSize, streamTime, status,
8874
                               stream_.callbackInfo.userData );
8875
 
8876
  if ( doStopStream == 2 ) {
8877
    abortStream();
8878
    return;
8879
  }
8880
 
8881
  MUTEX_LOCK( &stream_.mutex );
8882
  void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8883
  void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8884
 
8885
  if ( stream_.state != STREAM_RUNNING )
8886
    goto unlock;
8887
 
8888
  int pa_error;
8889
  size_t bytes;
8890
  if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8891
    if ( stream_.doConvertBuffer[OUTPUT] ) {
8892
        convertBuffer( stream_.deviceBuffer,
8893
                       stream_.userBuffer[OUTPUT],
8894
                       stream_.convertInfo[OUTPUT] );
8895
        bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8896
                formatBytes( stream_.deviceFormat[OUTPUT] );
8897
    } else
8898
        bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8899
                formatBytes( stream_.userFormat );
8900
 
8901
    if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8902
      errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8903
        pa_strerror( pa_error ) << ".";
8904
      errorText_ = errorStream_.str();
8905
      error( RtAudioError::WARNING );
8906
    }
8907
  }
8908
 
8909
  if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8910
    if ( stream_.doConvertBuffer[INPUT] )
8911
      bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8912
        formatBytes( stream_.deviceFormat[INPUT] );
8913
    else
8914
      bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8915
        formatBytes( stream_.userFormat );
8916
 
8917
    if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8918
      errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8919
        pa_strerror( pa_error ) << ".";
8920
      errorText_ = errorStream_.str();
8921
      error( RtAudioError::WARNING );
8922
    }
8923
    if ( stream_.doConvertBuffer[INPUT] ) {
8924
      convertBuffer( stream_.userBuffer[INPUT],
8925
                     stream_.deviceBuffer,
8926
                     stream_.convertInfo[INPUT] );
8927
    }
8928
  }
8929
 
8930
 unlock:
8931
  MUTEX_UNLOCK( &stream_.mutex );
8932
  RtApi::tickStreamTime();
8933
 
8934
  if ( doStopStream == 1 )
8935
    stopStream();
8936
}
8937
 
8938
void RtApiPulse::startStream( void )
8939
{
8940
  PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8941
 
8942
  if ( stream_.state == STREAM_CLOSED ) {
8943
    errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8944
    error( RtAudioError::INVALID_USE );
8945
    return;
8946
  }
8947
  if ( stream_.state == STREAM_RUNNING ) {
8948
    errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8949
    error( RtAudioError::WARNING );
8950
    return;
8951
  }
8952
 
8953
  MUTEX_LOCK( &stream_.mutex );
8954
 
8955
  #if defined( HAVE_GETTIMEOFDAY )
8956
  gettimeofday( &stream_.lastTickTimestamp, NULL );
8957
  #endif
8958
 
8959
  stream_.state = STREAM_RUNNING;
8960
 
8961
  pah->runnable = true;
8962
  pthread_cond_signal( &pah->runnable_cv );
8963
  MUTEX_UNLOCK( &stream_.mutex );
8964
}
8965
 
8966
void RtApiPulse::stopStream( void )
8967
{
8968
  PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8969
 
8970
  if ( stream_.state == STREAM_CLOSED ) {
8971
    errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8972
    error( RtAudioError::INVALID_USE );
8973
    return;
8974
  }
8975
  if ( stream_.state == STREAM_STOPPED ) {
8976
    errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8977
    error( RtAudioError::WARNING );
8978
    return;
8979
  }
8980
 
8981
  stream_.state = STREAM_STOPPED;
8982
  MUTEX_LOCK( &stream_.mutex );
8983
 
8984
  if ( pah ) {
8985
    pah->runnable = false;
8986
    if ( pah->s_play ) {
8987
      int pa_error;
8988
      if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8989
        errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8990
          pa_strerror( pa_error ) << ".";
8991
        errorText_ = errorStream_.str();
8992
        MUTEX_UNLOCK( &stream_.mutex );
8993
        error( RtAudioError::SYSTEM_ERROR );
8994
        return;
8995
      }
8996
    }
8997
  }
8998
 
8999
  stream_.state = STREAM_STOPPED;
9000
  MUTEX_UNLOCK( &stream_.mutex );
9001
}
9002
 
9003
void RtApiPulse::abortStream( void )
9004
{
9005
  PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
9006
 
9007
  if ( stream_.state == STREAM_CLOSED ) {
9008
    errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
9009
    error( RtAudioError::INVALID_USE );
9010
    return;
9011
  }
9012
  if ( stream_.state == STREAM_STOPPED ) {
9013
    errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
9014
    error( RtAudioError::WARNING );
9015
    return;
9016
  }
9017
 
9018
  stream_.state = STREAM_STOPPED;
9019
  MUTEX_LOCK( &stream_.mutex );
9020
 
9021
  if ( pah ) {
9022
    pah->runnable = false;
9023
    if ( pah->s_play ) {
9024
      int pa_error;
9025
      if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
9026
        errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
9027
          pa_strerror( pa_error ) << ".";
9028
        errorText_ = errorStream_.str();
9029
        MUTEX_UNLOCK( &stream_.mutex );
9030
        error( RtAudioError::SYSTEM_ERROR );
9031
        return;
9032
      }
9033
    }
9034
  }
9035
 
9036
  stream_.state = STREAM_STOPPED;
9037
  MUTEX_UNLOCK( &stream_.mutex );
9038
}
9039
 
9040
bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
9041
                                  unsigned int channels, unsigned int firstChannel,
9042
                                  unsigned int sampleRate, RtAudioFormat format,
9043
                                  unsigned int *bufferSize, RtAudio::StreamOptions *options )
9044
{
9045
  PulseAudioHandle *pah = 0;
9046
  unsigned long bufferBytes = 0;
9047
  pa_sample_spec ss;
9048
 
9049
  if ( device >= rt_pa_info.dev.size() ) return false;
9050
  if ( firstChannel != 0 ) {
9051
    errorText_ = "PulseAudio does not support channel offset mapping.";
9052
    return false;
9053
  }
9054
 
9055
  /* these may be NULL for default, but we've already got the names */
9056
  const char *dev_input = NULL;
9057
  const char *dev_output = NULL;
9058
  if (!rt_pa_info.dev[device].source_name.empty())
9059
    dev_input = rt_pa_info.dev[device].source_name.c_str();
9060
  if (!rt_pa_info.dev[device].sink_name.empty())
9061
    dev_output = rt_pa_info.dev[device].sink_name.c_str();
9062
 
9063
  if (mode==INPUT && rt_pa_info.dev[device].info.inputChannels == 0) {
9064
    errorText_ = "PulseAudio device does not support input.";
9065
    return false;
9066
  }
9067
  if (mode==OUTPUT && rt_pa_info.dev[device].info.outputChannels == 0) {
9068
    errorText_ = "PulseAudio device does not support output.";
9069
    return false;
9070
  }
9071
  if (mode==DUPLEX && rt_pa_info.dev[device].info.duplexChannels == 0) {
9072
    /* Note: will always error, DUPLEX not yet supported */
9073
    errorText_ = "PulseAudio device does not support duplex.";
9074
    return false;
9075
  }
9076
 
9077
  if (mode==INPUT && rt_pa_info.dev[device].info.inputChannels < channels) {
9078
    errorText_ = "PulseAudio: unsupported number of input channels.";
9079
    return false;
9080
  }
9081
 
9082
  if (mode==OUTPUT && rt_pa_info.dev[device].info.outputChannels < channels) {
9083
    errorText_ = "PulseAudio: unsupported number of output channels.";
9084
    return false;
9085
  }
9086
 
9087
  if (mode==DUPLEX && rt_pa_info.dev[device].info.duplexChannels < channels) {
9088
    /* Note: will always error, DUPLEX not yet supported */
9089
    errorText_ = "PulseAudio: unsupported number of duplex channels.";
9090
    return false;
9091
  }
9092
 
9093
  ss.channels = channels;
9094
 
9095
  bool sr_found = false;
9096
  for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
9097
    if ( sampleRate == *sr ) {
9098
      sr_found = true;
9099
      stream_.sampleRate = sampleRate;
9100
      ss.rate = sampleRate;
9101
      break;
9102
    }
9103
  }
9104
  if ( !sr_found ) {
9105
    stream_.sampleRate = sampleRate;
9106
    ss.rate = sampleRate;
9107
  }
9108
 
9109
  bool sf_found = 0;
9110
  for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
9111
        sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
9112
    if ( format == sf->rtaudio_format ) {
9113
      sf_found = true;
9114
      stream_.userFormat = sf->rtaudio_format;
9115
      stream_.deviceFormat[mode] = stream_.userFormat;
9116
      ss.format = sf->pa_format;
9117
      break;
9118
    }
9119
  }
9120
  if ( !sf_found ) { // Use internal data format conversion.
9121
    stream_.userFormat = format;
9122
    stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
9123
    ss.format = PA_SAMPLE_FLOAT32LE;
9124
  }
9125
 
9126
  // Set other stream parameters.
9127
  if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
9128
  else stream_.userInterleaved = true;
9129
  stream_.deviceInterleaved[mode] = true;
9130
  stream_.nBuffers = options ? options->numberOfBuffers : 1;
9131
  stream_.doByteSwap[mode] = false;
9132
  stream_.nUserChannels[mode] = channels;
9133
  stream_.nDeviceChannels[mode] = channels + firstChannel;
9134
  stream_.channelOffset[mode] = 0;
9135
  std::string streamName = "RtAudio";
9136
 
9137
  // Set flags for buffer conversion.
9138
  stream_.doConvertBuffer[mode] = false;
9139
  if ( stream_.userFormat != stream_.deviceFormat[mode] )
9140
    stream_.doConvertBuffer[mode] = true;
9141
  if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9142
    stream_.doConvertBuffer[mode] = true;
9143
  if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] )
9144
    stream_.doConvertBuffer[mode] = true;
9145
 
9146
  // Allocate necessary internal buffers.
9147
  bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9148
  stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9149
  if ( stream_.userBuffer[mode] == NULL ) {
9150
    errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
9151
    goto error;
9152
  }
9153
  stream_.bufferSize = *bufferSize;
9154
 
9155
  if ( stream_.doConvertBuffer[mode] ) {
9156
 
9157
    bool makeBuffer = true;
9158
    bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9159
    if ( mode == INPUT ) {
9160
      if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9161
        unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9162
        if ( bufferBytes <= bytesOut ) makeBuffer = false;
9163
      }
9164
    }
9165
 
9166
    if ( makeBuffer ) {
9167
      bufferBytes *= *bufferSize;
9168
      if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9169
      stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9170
      if ( stream_.deviceBuffer == NULL ) {
9171
        errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
9172
        goto error;
9173
      }
9174
    }
9175
  }
9176
 
9177
  stream_.device[mode] = device;
9178
 
9179
  // Setup the buffer conversion information structure.
9180
  if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9181
 
9182
  if ( !stream_.apiHandle ) {
9183
    PulseAudioHandle *pah = new PulseAudioHandle;
9184
    if ( !pah ) {
9185
      errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
9186
      goto error;
9187
    }
9188
 
9189
    stream_.apiHandle = pah;
9190
    if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
9191
      errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
9192
      goto error;
9193
    }
9194
  }
9195
  pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
9196
 
9197
  int error;
9198
  if ( options && !options->streamName.empty() ) streamName = options->streamName;
9199
  switch ( mode ) {
9200
    pa_buffer_attr buffer_attr;
9201
  case INPUT:
9202
    buffer_attr.fragsize = bufferBytes;
9203
    buffer_attr.maxlength = -1;
9204
 
9205
    pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD,
9206
                                dev_input, "Record", &ss, NULL, &buffer_attr, &error );
9207
    if ( !pah->s_rec ) {
9208
      errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
9209
      goto error;
9210
    }
9211
    break;
9212
  case OUTPUT: {
9213
    pa_buffer_attr * attr_ptr;
9214
 
9215
    if ( options && options->numberOfBuffers > 0 ) {
9216
      // pa_buffer_attr::fragsize is recording-only.
9217
      // Hopefully PortAudio won't access uninitialized fields.
9218
      buffer_attr.maxlength = bufferBytes * options->numberOfBuffers;
9219
      buffer_attr.minreq = -1;
9220
      buffer_attr.prebuf = -1;
9221
      buffer_attr.tlength = -1;
9222
      attr_ptr = &buffer_attr;
9223
    } else {
9224
      attr_ptr = nullptr;
9225
    }
9226
 
9227
    pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK,
9228
                                 dev_output, "Playback", &ss, NULL, attr_ptr, &error );
9229
    if ( !pah->s_play ) {
9230
      errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
9231
      goto error;
9232
    }
9233
    break;
9234
  }
9235
  case DUPLEX:
9236
    /* Note: We could add DUPLEX by synchronizing multiple streams,
9237
       but it would mean moving from Simple API to Asynchronous API:
9238
       https://freedesktop.org/software/pulseaudio/doxygen/streams.html#sync_streams */
9239
    errorText_ = "RtApiPulse::probeDeviceOpen: duplex not supported for PulseAudio.";
9240
    goto error;
9241
  default:
9242
    goto error;
9243
  }
9244
 
9245
  if ( stream_.mode == UNINITIALIZED )
9246
    stream_.mode = mode;
9247
  else if ( stream_.mode == mode )
9248
    goto error;
9249
  else
9250
    stream_.mode = DUPLEX;
9251
 
9252
  if ( !stream_.callbackInfo.isRunning ) {
9253
    stream_.callbackInfo.object = this;
9254
 
9255
    stream_.state = STREAM_STOPPED;
9256
    // Set the thread attributes for joinable and realtime scheduling
9257
    // priority (optional).  The higher priority will only take affect
9258
    // if the program is run as root or suid. Note, under Linux
9259
    // processes with CAP_SYS_NICE privilege, a user can change
9260
    // scheduling policy and priority (thus need not be root). See
9261
    // POSIX "capabilities".
9262
    pthread_attr_t attr;
9263
    pthread_attr_init( &attr );
9264
    pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9265
#ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9266
    if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9267
      stream_.callbackInfo.doRealtime = true;
9268
      struct sched_param param;
9269
      int priority = options->priority;
9270
      int min = sched_get_priority_min( SCHED_RR );
9271
      int max = sched_get_priority_max( SCHED_RR );
9272
      if ( priority < min ) priority = min;
9273
      else if ( priority > max ) priority = max;
9274
      param.sched_priority = priority;
9275
 
9276
      // Set the policy BEFORE the priority. Otherwise it fails.
9277
      pthread_attr_setschedpolicy(&attr, SCHED_RR);
9278
      pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9279
      // This is definitely required. Otherwise it fails.
9280
      pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9281
      pthread_attr_setschedparam(&attr, &param);
9282
    }
9283
    else
9284
      pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9285
#else
9286
    pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9287
#endif
9288
 
9289
    stream_.callbackInfo.isRunning = true;
9290
    int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
9291
    pthread_attr_destroy(&attr);
9292
    if(result != 0) {
9293
      // Failed. Try instead with default attributes.
9294
      result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
9295
      if(result != 0) {
9296
        stream_.callbackInfo.isRunning = false;
9297
        errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
9298
        goto error;
9299
      }
9300
    }
9301
  }
9302
 
9303
  return SUCCESS;
9304
 
9305
 error:
9306
  if ( pah && stream_.callbackInfo.isRunning ) {
9307
    pthread_cond_destroy( &pah->runnable_cv );
9308
    delete pah;
9309
    stream_.apiHandle = 0;
9310
  }
9311
 
9312
  for ( int i=0; i<2; i++ ) {
9313
    if ( stream_.userBuffer[i] ) {
9314
      free( stream_.userBuffer[i] );
9315
      stream_.userBuffer[i] = 0;
9316
    }
9317
  }
9318
 
9319
  if ( stream_.deviceBuffer ) {
9320
    free( stream_.deviceBuffer );
9321
    stream_.deviceBuffer = 0;
9322
  }
9323
 
9324
  stream_.state = STREAM_CLOSED;
9325
  return FAILURE;
9326
}
9327
 
9328
//******************** End of __LINUX_PULSE__ *********************//
9329
#endif
9330
 
9331
#if defined(__LINUX_OSS__)
9332
 
9333
#include <unistd.h>
9334
#include <sys/ioctl.h>
9335
#include <unistd.h>
9336
#include <fcntl.h>
9337
#include <sys/soundcard.h>
9338
#include <errno.h>
9339
#include <math.h>
9340
 
9341
static void *ossCallbackHandler(void * ptr);
9342
 
9343
// A structure to hold various information related to the OSS API
9344
// implementation.
9345
struct OssHandle {
9346
  int id[2];    // device ids
9347
  bool xrun[2];
9348
  bool triggered;
9349
  pthread_cond_t runnable;
9350
 
9351
  OssHandle()
9352
    :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9353
};
9354
 
9355
RtApiOss :: RtApiOss()
9356
{
9357
  // Nothing to do here.
9358
}
9359
 
9360
RtApiOss :: ~RtApiOss()
9361
{
9362
  if ( stream_.state != STREAM_CLOSED ) closeStream();
9363
}
9364
 
9365
unsigned int RtApiOss :: getDeviceCount( void )
9366
{
9367
  int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9368
  if ( mixerfd == -1 ) {
9369
    errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9370
    error( RtAudioError::WARNING );
9371
    return 0;
9372
  }
9373
 
9374
  oss_sysinfo sysinfo;
9375
  if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9376
    close( mixerfd );
9377
    errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9378
    error( RtAudioError::WARNING );
9379
    return 0;
9380
  }
9381
 
9382
  close( mixerfd );
9383
  return sysinfo.numaudios;
9384
}
9385
 
9386
RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9387
{
9388
  RtAudio::DeviceInfo info;
9389
  info.probed = false;
9390
 
9391
  int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9392
  if ( mixerfd == -1 ) {
9393
    errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9394
    error( RtAudioError::WARNING );
9395
    return info;
9396
  }
9397
 
9398
  oss_sysinfo sysinfo;
9399
  int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9400
  if ( result == -1 ) {
9401
    close( mixerfd );
9402
    errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9403
    error( RtAudioError::WARNING );
9404
    return info;
9405
  }
9406
 
9407
  unsigned nDevices = sysinfo.numaudios;
9408
  if ( nDevices == 0 ) {
9409
    close( mixerfd );
9410
    errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9411
    error( RtAudioError::INVALID_USE );
9412
    return info;
9413
  }
9414
 
9415
  if ( device >= nDevices ) {
9416
    close( mixerfd );
9417
    errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9418
    error( RtAudioError::INVALID_USE );
9419
    return info;
9420
  }
9421
 
9422
  oss_audioinfo ainfo;
9423
  ainfo.dev = device;
9424
  result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9425
  close( mixerfd );
9426
  if ( result == -1 ) {
9427
    errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9428
    errorText_ = errorStream_.str();
9429
    error( RtAudioError::WARNING );
9430
    return info;
9431
  }
9432
 
9433
  // Probe channels
9434
  if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9435
  if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9436
  if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9437
    if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9438
      info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9439
  }
9440
 
9441
  // Probe data formats ... do for input
9442
  unsigned long mask = ainfo.iformats;
9443
  if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9444
    info.nativeFormats |= RTAUDIO_SINT16;
9445
  if ( mask & AFMT_S8 )
9446
    info.nativeFormats |= RTAUDIO_SINT8;
9447
  if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9448
    info.nativeFormats |= RTAUDIO_SINT32;
9449
#ifdef AFMT_FLOAT
9450
  if ( mask & AFMT_FLOAT )
9451
    info.nativeFormats |= RTAUDIO_FLOAT32;
9452
#endif
9453
  if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9454
    info.nativeFormats |= RTAUDIO_SINT24;
9455
 
9456
  // Check that we have at least one supported format
9457
  if ( info.nativeFormats == 0 ) {
9458
    errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9459
    errorText_ = errorStream_.str();
9460
    error( RtAudioError::WARNING );
9461
    return info;
9462
  }
9463
 
9464
  // Probe the supported sample rates.
9465
  info.sampleRates.clear();
9466
  if ( ainfo.nrates ) {
9467
    for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9468
      for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9469
        if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9470
          info.sampleRates.push_back( SAMPLE_RATES[k] );
9471
 
9472
          if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9473
            info.preferredSampleRate = SAMPLE_RATES[k];
9474
 
9475
          break;
9476
        }
9477
      }
9478
    }
9479
  }
9480
  else {
9481
    // Check min and max rate values;
9482
    for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9483
      if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9484
        info.sampleRates.push_back( SAMPLE_RATES[k] );
9485
 
9486
        if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9487
          info.preferredSampleRate = SAMPLE_RATES[k];
9488
      }
9489
    }
9490
  }
9491
 
9492
  if ( info.sampleRates.size() == 0 ) {
9493
    errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9494
    errorText_ = errorStream_.str();
9495
    error( RtAudioError::WARNING );
9496
  }
9497
  else {
9498
    info.probed = true;
9499
    info.name = ainfo.name;
9500
  }
9501
 
9502
  return info;
9503
}
9504
 
9505
 
9506
bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9507
                                  unsigned int firstChannel, unsigned int sampleRate,
9508
                                  RtAudioFormat format, unsigned int *bufferSize,
9509
                                  RtAudio::StreamOptions *options )
9510
{
9511
  int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9512
  if ( mixerfd == -1 ) {
9513
    errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9514
    return FAILURE;
9515
  }
9516
 
9517
  oss_sysinfo sysinfo;
9518
  int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9519
  if ( result == -1 ) {
9520
    close( mixerfd );
9521
    errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9522
    return FAILURE;
9523
  }
9524
 
9525
  unsigned nDevices = sysinfo.numaudios;
9526
  if ( nDevices == 0 ) {
9527
    // This should not happen because a check is made before this function is called.
9528
    close( mixerfd );
9529
    errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9530
    return FAILURE;
9531
  }
9532
 
9533
  if ( device >= nDevices ) {
9534
    // This should not happen because a check is made before this function is called.
9535
    close( mixerfd );
9536
    errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9537
    return FAILURE;
9538
  }
9539
 
9540
  oss_audioinfo ainfo;
9541
  ainfo.dev = device;
9542
  result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9543
  close( mixerfd );
9544
  if ( result == -1 ) {
9545
    errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9546
    errorText_ = errorStream_.str();
9547
    return FAILURE;
9548
  }
9549
 
9550
  // Check if device supports input or output
9551
  if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9552
       ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9553
    if ( mode == OUTPUT )
9554
      errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9555
    else
9556
      errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9557
    errorText_ = errorStream_.str();
9558
    return FAILURE;
9559
  }
9560
 
9561
  int flags = 0;
9562
  OssHandle *handle = (OssHandle *) stream_.apiHandle;
9563
  if ( mode == OUTPUT )
9564
    flags |= O_WRONLY;
9565
  else { // mode == INPUT
9566
    if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9567
      // We just set the same device for playback ... close and reopen for duplex (OSS only).
9568
      close( handle->id[0] );
9569
      handle->id[0] = 0;
9570
      if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9571
        errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9572
        errorText_ = errorStream_.str();
9573
        return FAILURE;
9574
      }
9575
      // Check that the number previously set channels is the same.
9576
      if ( stream_.nUserChannels[0] != channels ) {
9577
        errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9578
        errorText_ = errorStream_.str();
9579
        return FAILURE;
9580
      }
9581
      flags |= O_RDWR;
9582
    }
9583
    else
9584
      flags |= O_RDONLY;
9585
  }
9586
 
9587
  // Set exclusive access if specified.
9588
  if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9589
 
9590
  // Try to open the device.
9591
  int fd;
9592
  fd = open( ainfo.devnode, flags, 0 );
9593
  if ( fd == -1 ) {
9594
    if ( errno == EBUSY )
9595
      errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9596
    else
9597
      errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9598
    errorText_ = errorStream_.str();
9599
    return FAILURE;
9600
  }
9601
 
9602
  // For duplex operation, specifically set this mode (this doesn't seem to work).
9603
  /*
9604
    if ( flags | O_RDWR ) {
9605
    result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9606
    if ( result == -1) {
9607
    errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9608
    errorText_ = errorStream_.str();
9609
    return FAILURE;
9610
    }
9611
    }
9612
  */
9613
 
9614
  // Check the device channel support.
9615
  stream_.nUserChannels[mode] = channels;
9616
  if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9617
    close( fd );
9618
    errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9619
    errorText_ = errorStream_.str();
9620
    return FAILURE;
9621
  }
9622
 
9623
  // Set the number of channels.
9624
  int deviceChannels = channels + firstChannel;
9625
  result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9626
  if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9627
    close( fd );
9628
    errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9629
    errorText_ = errorStream_.str();
9630
    return FAILURE;
9631
  }
9632
  stream_.nDeviceChannels[mode] = deviceChannels;
9633
 
9634
  // Get the data format mask
9635
  int mask;
9636
  result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9637
  if ( result == -1 ) {
9638
    close( fd );
9639
    errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9640
    errorText_ = errorStream_.str();
9641
    return FAILURE;
9642
  }
9643
 
9644
  // Determine how to set the device format.
9645
  stream_.userFormat = format;
9646
  int deviceFormat = -1;
9647
  stream_.doByteSwap[mode] = false;
9648
  if ( format == RTAUDIO_SINT8 ) {
9649
    if ( mask & AFMT_S8 ) {
9650
      deviceFormat = AFMT_S8;
9651
      stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9652
    }
9653
  }
9654
  else if ( format == RTAUDIO_SINT16 ) {
9655
    if ( mask & AFMT_S16_NE ) {
9656
      deviceFormat = AFMT_S16_NE;
9657
      stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9658
    }
9659
    else if ( mask & AFMT_S16_OE ) {
9660
      deviceFormat = AFMT_S16_OE;
9661
      stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9662
      stream_.doByteSwap[mode] = true;
9663
    }
9664
  }
9665
  else if ( format == RTAUDIO_SINT24 ) {
9666
    if ( mask & AFMT_S24_NE ) {
9667
      deviceFormat = AFMT_S24_NE;
9668
      stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9669
    }
9670
    else if ( mask & AFMT_S24_OE ) {
9671
      deviceFormat = AFMT_S24_OE;
9672
      stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9673
      stream_.doByteSwap[mode] = true;
9674
    }
9675
  }
9676
  else if ( format == RTAUDIO_SINT32 ) {
9677
    if ( mask & AFMT_S32_NE ) {
9678
      deviceFormat = AFMT_S32_NE;
9679
      stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9680
    }
9681
    else if ( mask & AFMT_S32_OE ) {
9682
      deviceFormat = AFMT_S32_OE;
9683
      stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9684
      stream_.doByteSwap[mode] = true;
9685
    }
9686
  }
9687
 
9688
  if ( deviceFormat == -1 ) {
9689
    // The user requested format is not natively supported by the device.
9690
    if ( mask & AFMT_S16_NE ) {
9691
      deviceFormat = AFMT_S16_NE;
9692
      stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9693
    }
9694
    else if ( mask & AFMT_S32_NE ) {
9695
      deviceFormat = AFMT_S32_NE;
9696
      stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9697
    }
9698
    else if ( mask & AFMT_S24_NE ) {
9699
      deviceFormat = AFMT_S24_NE;
9700
      stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9701
    }
9702
    else if ( mask & AFMT_S16_OE ) {
9703
      deviceFormat = AFMT_S16_OE;
9704
      stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9705
      stream_.doByteSwap[mode] = true;
9706
    }
9707
    else if ( mask & AFMT_S32_OE ) {
9708
      deviceFormat = AFMT_S32_OE;
9709
      stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9710
      stream_.doByteSwap[mode] = true;
9711
    }
9712
    else if ( mask & AFMT_S24_OE ) {
9713
      deviceFormat = AFMT_S24_OE;
9714
      stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9715
      stream_.doByteSwap[mode] = true;
9716
    }
9717
    else if ( mask & AFMT_S8) {
9718
      deviceFormat = AFMT_S8;
9719
      stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9720
    }
9721
  }
9722
 
9723
  if ( stream_.deviceFormat[mode] == 0 ) {
9724
    // This really shouldn't happen ...
9725
    close( fd );
9726
    errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9727
    errorText_ = errorStream_.str();
9728
    return FAILURE;
9729
  }
9730
 
9731
  // Set the data format.
9732
  int temp = deviceFormat;
9733
  result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9734
  if ( result == -1 || deviceFormat != temp ) {
9735
    close( fd );
9736
    errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9737
    errorText_ = errorStream_.str();
9738
    return FAILURE;
9739
  }
9740
 
9741
  // Attempt to set the buffer size.  According to OSS, the minimum
9742
  // number of buffers is two.  The supposed minimum buffer size is 16
9743
  // bytes, so that will be our lower bound.  The argument to this
9744
  // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9745
  // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9746
  // We'll check the actual value used near the end of the setup
9747
  // procedure.
9748
  int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9749
  if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9750
  int buffers = 0;
9751
  if ( options ) buffers = options->numberOfBuffers;
9752
  if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9753
  if ( buffers < 2 ) buffers = 3;
9754
  temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9755
  result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9756
  if ( result == -1 ) {
9757
    close( fd );
9758
    errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9759
    errorText_ = errorStream_.str();
9760
    return FAILURE;
9761
  }
9762
  stream_.nBuffers = buffers;
9763
 
9764
  // Save buffer size (in sample frames).
9765
  *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9766
  stream_.bufferSize = *bufferSize;
9767
 
9768
  // Set the sample rate.
9769
  int srate = sampleRate;
9770
  result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9771
  if ( result == -1 ) {
9772
    close( fd );
9773
    errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9774
    errorText_ = errorStream_.str();
9775
    return FAILURE;
9776
  }
9777
 
9778
  // Verify the sample rate setup worked.
9779
  if ( abs( srate - (int)sampleRate ) > 100 ) {
9780
    close( fd );
9781
    errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9782
    errorText_ = errorStream_.str();
9783
    return FAILURE;
9784
  }
9785
  stream_.sampleRate = sampleRate;
9786
 
9787
  if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9788
    // We're doing duplex setup here.
9789
    stream_.deviceFormat[0] = stream_.deviceFormat[1];
9790
    stream_.nDeviceChannels[0] = deviceChannels;
9791
  }
9792
 
9793
  // Set interleaving parameters.
9794
  stream_.userInterleaved = true;
9795
  stream_.deviceInterleaved[mode] =  true;
9796
  if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9797
    stream_.userInterleaved = false;
9798
 
9799
  // Set flags for buffer conversion
9800
  stream_.doConvertBuffer[mode] = false;
9801
  if ( stream_.userFormat != stream_.deviceFormat[mode] )
9802
    stream_.doConvertBuffer[mode] = true;
9803
  if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9804
    stream_.doConvertBuffer[mode] = true;
9805
  if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9806
       stream_.nUserChannels[mode] > 1 )
9807
    stream_.doConvertBuffer[mode] = true;
9808
 
9809
  // Allocate the stream handles if necessary and then save.
9810
  if ( stream_.apiHandle == 0 ) {
9811
    try {
9812
      handle = new OssHandle;
9813
    }
9814
    catch ( std::bad_alloc& ) {
9815
      errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9816
      goto error;
9817
    }
9818
 
9819
    if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9820
      errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9821
      goto error;
9822
    }
9823
 
9824
    stream_.apiHandle = (void *) handle;
9825
  }
9826
  else {
9827
    handle = (OssHandle *) stream_.apiHandle;
9828
  }
9829
  handle->id[mode] = fd;
9830
 
9831
  // Allocate necessary internal buffers.
9832
  unsigned long bufferBytes;
9833
  bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9834
  stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9835
  if ( stream_.userBuffer[mode] == NULL ) {
9836
    errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9837
    goto error;
9838
  }
9839
 
9840
  if ( stream_.doConvertBuffer[mode] ) {
9841
 
9842
    bool makeBuffer = true;
9843
    bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9844
    if ( mode == INPUT ) {
9845
      if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9846
        unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9847
        if ( bufferBytes <= bytesOut ) makeBuffer = false;
9848
      }
9849
    }
9850
 
9851
    if ( makeBuffer ) {
9852
      bufferBytes *= *bufferSize;
9853
      if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9854
      stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9855
      if ( stream_.deviceBuffer == NULL ) {
9856
        errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9857
        goto error;
9858
      }
9859
    }
9860
  }
9861
 
9862
  stream_.device[mode] = device;
9863
  stream_.state = STREAM_STOPPED;
9864
 
9865
  // Setup the buffer conversion information structure.
9866
  if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9867
 
9868
  // Setup thread if necessary.
9869
  if ( stream_.mode == OUTPUT && mode == INPUT ) {
9870
    // We had already set up an output stream.
9871
    stream_.mode = DUPLEX;
9872
    if ( stream_.device[0] == device ) handle->id[0] = fd;
9873
  }
9874
  else {
9875
    stream_.mode = mode;
9876
 
9877
    // Setup callback thread.
9878
    stream_.callbackInfo.object = (void *) this;
9879
 
9880
    // Set the thread attributes for joinable and realtime scheduling
9881
    // priority.  The higher priority will only take affect if the
9882
    // program is run as root or suid.
9883
    pthread_attr_t attr;
9884
    pthread_attr_init( &attr );
9885
    pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9886
#ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9887
    if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9888
      stream_.callbackInfo.doRealtime = true;
9889
      struct sched_param param;
9890
      int priority = options->priority;
9891
      int min = sched_get_priority_min( SCHED_RR );
9892
      int max = sched_get_priority_max( SCHED_RR );
9893
      if ( priority < min ) priority = min;
9894
      else if ( priority > max ) priority = max;
9895
      param.sched_priority = priority;
9896
 
9897
      // Set the policy BEFORE the priority. Otherwise it fails.
9898
      pthread_attr_setschedpolicy(&attr, SCHED_RR);
9899
      pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9900
      // This is definitely required. Otherwise it fails.
9901
      pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9902
      pthread_attr_setschedparam(&attr, &param);
9903
    }
9904
    else
9905
      pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9906
#else
9907
    pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9908
#endif
9909
 
9910
    stream_.callbackInfo.isRunning = true;
9911
    result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9912
    pthread_attr_destroy( &attr );
9913
    if ( result ) {
9914
      // Failed. Try instead with default attributes.
9915
      result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9916
      if ( result ) {
9917
        stream_.callbackInfo.isRunning = false;
9918
        errorText_ = "RtApiOss::error creating callback thread!";
9919
        goto error;
9920
      }
9921
    }
9922
  }
9923
 
9924
  return SUCCESS;
9925
 
9926
 error:
9927
  if ( handle ) {
9928
    pthread_cond_destroy( &handle->runnable );
9929
    if ( handle->id[0] ) close( handle->id[0] );
9930
    if ( handle->id[1] ) close( handle->id[1] );
9931
    delete handle;
9932
    stream_.apiHandle = 0;
9933
  }
9934
 
9935
  for ( int i=0; i<2; i++ ) {
9936
    if ( stream_.userBuffer[i] ) {
9937
      free( stream_.userBuffer[i] );
9938
      stream_.userBuffer[i] = 0;
9939
    }
9940
  }
9941
 
9942
  if ( stream_.deviceBuffer ) {
9943
    free( stream_.deviceBuffer );
9944
    stream_.deviceBuffer = 0;
9945
  }
9946
 
9947
  stream_.state = STREAM_CLOSED;
9948
  return FAILURE;
9949
}
9950
 
9951
void RtApiOss :: closeStream()
9952
{
9953
  if ( stream_.state == STREAM_CLOSED ) {
9954
    errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9955
    error( RtAudioError::WARNING );
9956
    return;
9957
  }
9958
 
9959
  OssHandle *handle = (OssHandle *) stream_.apiHandle;
9960
  stream_.callbackInfo.isRunning = false;
9961
  MUTEX_LOCK( &stream_.mutex );
9962
  if ( stream_.state == STREAM_STOPPED )
9963
    pthread_cond_signal( &handle->runnable );
9964
  MUTEX_UNLOCK( &stream_.mutex );
9965
  pthread_join( stream_.callbackInfo.thread, NULL );
9966
 
9967
  if ( stream_.state == STREAM_RUNNING ) {
9968
    if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9969
      ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9970
    else
9971
      ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9972
    stream_.state = STREAM_STOPPED;
9973
  }
9974
 
9975
  if ( handle ) {
9976
    pthread_cond_destroy( &handle->runnable );
9977
    if ( handle->id[0] ) close( handle->id[0] );
9978
    if ( handle->id[1] ) close( handle->id[1] );
9979
    delete handle;
9980
    stream_.apiHandle = 0;
9981
  }
9982
 
9983
  for ( int i=0; i<2; i++ ) {
9984
    if ( stream_.userBuffer[i] ) {
9985
      free( stream_.userBuffer[i] );
9986
      stream_.userBuffer[i] = 0;
9987
    }
9988
  }
9989
 
9990
  if ( stream_.deviceBuffer ) {
9991
    free( stream_.deviceBuffer );
9992
    stream_.deviceBuffer = 0;
9993
  }
9994
 
9995
  stream_.mode = UNINITIALIZED;
9996
  stream_.state = STREAM_CLOSED;
9997
}
9998
 
9999
void RtApiOss :: startStream()
10000
{
10001
  verifyStream();
10002
  if ( stream_.state == STREAM_RUNNING ) {
10003
    errorText_ = "RtApiOss::startStream(): the stream is already running!";
10004
    error( RtAudioError::WARNING );
10005
    return;
10006
  }
10007
 
10008
  MUTEX_LOCK( &stream_.mutex );
10009
 
10010
  #if defined( HAVE_GETTIMEOFDAY )
10011
  gettimeofday( &stream_.lastTickTimestamp, NULL );
10012
  #endif
10013
 
10014
  stream_.state = STREAM_RUNNING;
10015
 
10016
  // No need to do anything else here ... OSS automatically starts
10017
  // when fed samples.
10018
 
10019
  MUTEX_UNLOCK( &stream_.mutex );
10020
 
10021
  OssHandle *handle = (OssHandle *) stream_.apiHandle;
10022
  pthread_cond_signal( &handle->runnable );
10023
}
10024
 
10025
void RtApiOss :: stopStream()
10026
{
10027
  verifyStream();
10028
  if ( stream_.state == STREAM_STOPPED ) {
10029
    errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
10030
    error( RtAudioError::WARNING );
10031
    return;
10032
  }
10033
 
10034
  MUTEX_LOCK( &stream_.mutex );
10035
 
10036
  // The state might change while waiting on a mutex.
10037
  if ( stream_.state == STREAM_STOPPED ) {
10038
    MUTEX_UNLOCK( &stream_.mutex );
10039
    return;
10040
  }
10041
 
10042
  int result = 0;
10043
  OssHandle *handle = (OssHandle *) stream_.apiHandle;
10044
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
10045
 
10046
    // Flush the output with zeros a few times.
10047
    char *buffer;
10048
    int samples;
10049
    RtAudioFormat format;
10050
 
10051
    if ( stream_.doConvertBuffer[0] ) {
10052
      buffer = stream_.deviceBuffer;
10053
      samples = stream_.bufferSize * stream_.nDeviceChannels[0];
10054
      format = stream_.deviceFormat[0];
10055
    }
10056
    else {
10057
      buffer = stream_.userBuffer[0];
10058
      samples = stream_.bufferSize * stream_.nUserChannels[0];
10059
      format = stream_.userFormat;
10060
    }
10061
 
10062
    memset( buffer, 0, samples * formatBytes(format) );
10063
    for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
10064
      result = write( handle->id[0], buffer, samples * formatBytes(format) );
10065
      if ( result == -1 ) {
10066
        errorText_ = "RtApiOss::stopStream: audio write error.";
10067
        error( RtAudioError::WARNING );
10068
      }
10069
    }
10070
 
10071
    result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
10072
    if ( result == -1 ) {
10073
      errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
10074
      errorText_ = errorStream_.str();
10075
      goto unlock;
10076
    }
10077
    handle->triggered = false;
10078
  }
10079
 
10080
  if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
10081
    result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
10082
    if ( result == -1 ) {
10083
      errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
10084
      errorText_ = errorStream_.str();
10085
      goto unlock;
10086
    }
10087
  }
10088
 
10089
 unlock:
10090
  stream_.state = STREAM_STOPPED;
10091
  MUTEX_UNLOCK( &stream_.mutex );
10092
 
10093
  if ( result != -1 ) return;
10094
  error( RtAudioError::SYSTEM_ERROR );
10095
}
10096
 
10097
void RtApiOss :: abortStream()
10098
{
10099
  verifyStream();
10100
  if ( stream_.state == STREAM_STOPPED ) {
10101
    errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
10102
    error( RtAudioError::WARNING );
10103
    return;
10104
  }
10105
 
10106
  MUTEX_LOCK( &stream_.mutex );
10107
 
10108
  // The state might change while waiting on a mutex.
10109
  if ( stream_.state == STREAM_STOPPED ) {
10110
    MUTEX_UNLOCK( &stream_.mutex );
10111
    return;
10112
  }
10113
 
10114
  int result = 0;
10115
  OssHandle *handle = (OssHandle *) stream_.apiHandle;
10116
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
10117
    result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
10118
    if ( result == -1 ) {
10119
      errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
10120
      errorText_ = errorStream_.str();
10121
      goto unlock;
10122
    }
10123
    handle->triggered = false;
10124
  }
10125
 
10126
  if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
10127
    result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
10128
    if ( result == -1 ) {
10129
      errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
10130
      errorText_ = errorStream_.str();
10131
      goto unlock;
10132
    }
10133
  }
10134
 
10135
 unlock:
10136
  stream_.state = STREAM_STOPPED;
10137
  MUTEX_UNLOCK( &stream_.mutex );
10138
 
10139
  if ( result != -1 ) return;
10140
  error( RtAudioError::SYSTEM_ERROR );
10141
}
10142
 
10143
void RtApiOss :: callbackEvent()
10144
{
10145
  OssHandle *handle = (OssHandle *) stream_.apiHandle;
10146
  if ( stream_.state == STREAM_STOPPED ) {
10147
    MUTEX_LOCK( &stream_.mutex );
10148
    pthread_cond_wait( &handle->runnable, &stream_.mutex );
10149
    if ( stream_.state != STREAM_RUNNING ) {
10150
      MUTEX_UNLOCK( &stream_.mutex );
10151
      return;
10152
    }
10153
    MUTEX_UNLOCK( &stream_.mutex );
10154
  }
10155
 
10156
  if ( stream_.state == STREAM_CLOSED ) {
10157
    errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
10158
    error( RtAudioError::WARNING );
10159
    return;
10160
  }
10161
 
10162
  // Invoke user callback to get fresh output data.
10163
  int doStopStream = 0;
10164
  RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
10165
  double streamTime = getStreamTime();
10166
  RtAudioStreamStatus status = 0;
10167
  if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
10168
    status |= RTAUDIO_OUTPUT_UNDERFLOW;
10169
    handle->xrun[0] = false;
10170
  }
10171
  if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
10172
    status |= RTAUDIO_INPUT_OVERFLOW;
10173
    handle->xrun[1] = false;
10174
  }
10175
  doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
10176
                           stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
10177
  if ( doStopStream == 2 ) {
10178
    this->abortStream();
10179
    return;
10180
  }
10181
 
10182
  MUTEX_LOCK( &stream_.mutex );
10183
 
10184
  // The state might change while waiting on a mutex.
10185
  if ( stream_.state == STREAM_STOPPED ) goto unlock;
10186
 
10187
  int result;
10188
  char *buffer;
10189
  int samples;
10190
  RtAudioFormat format;
10191
 
10192
  if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
10193
 
10194
    // Setup parameters and do buffer conversion if necessary.
10195
    if ( stream_.doConvertBuffer[0] ) {
10196
      buffer = stream_.deviceBuffer;
10197
      convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
10198
      samples = stream_.bufferSize * stream_.nDeviceChannels[0];
10199
      format = stream_.deviceFormat[0];
10200
    }
10201
    else {
10202
      buffer = stream_.userBuffer[0];
10203
      samples = stream_.bufferSize * stream_.nUserChannels[0];
10204
      format = stream_.userFormat;
10205
    }
10206
 
10207
    // Do byte swapping if necessary.
10208
    if ( stream_.doByteSwap[0] )
10209
      byteSwapBuffer( buffer, samples, format );
10210
 
10211
    if ( stream_.mode == DUPLEX && handle->triggered == false ) {
10212
      int trig = 0;
10213
      ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
10214
      result = write( handle->id[0], buffer, samples * formatBytes(format) );
10215
      trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
10216
      ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
10217
      handle->triggered = true;
10218
    }
10219
    else
10220
      // Write samples to device.
10221
      result = write( handle->id[0], buffer, samples * formatBytes(format) );
10222
 
10223
    if ( result == -1 ) {
10224
      // We'll assume this is an underrun, though there isn't a
10225
      // specific means for determining that.
10226
      handle->xrun[0] = true;
10227
      errorText_ = "RtApiOss::callbackEvent: audio write error.";
10228
      error( RtAudioError::WARNING );
10229
      // Continue on to input section.
10230
    }
10231
  }
10232
 
10233
  if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
10234
 
10235
    // Setup parameters.
10236
    if ( stream_.doConvertBuffer[1] ) {
10237
      buffer = stream_.deviceBuffer;
10238
      samples = stream_.bufferSize * stream_.nDeviceChannels[1];
10239
      format = stream_.deviceFormat[1];
10240
    }
10241
    else {
10242
      buffer = stream_.userBuffer[1];
10243
      samples = stream_.bufferSize * stream_.nUserChannels[1];
10244
      format = stream_.userFormat;
10245
    }
10246
 
10247
    // Read samples from device.
10248
    result = read( handle->id[1], buffer, samples * formatBytes(format) );
10249
 
10250
    if ( result == -1 ) {
10251
      // We'll assume this is an overrun, though there isn't a
10252
      // specific means for determining that.
10253
      handle->xrun[1] = true;
10254
      errorText_ = "RtApiOss::callbackEvent: audio read error.";
10255
      error( RtAudioError::WARNING );
10256
      goto unlock;
10257
    }
10258
 
10259
    // Do byte swapping if necessary.
10260
    if ( stream_.doByteSwap[1] )
10261
      byteSwapBuffer( buffer, samples, format );
10262
 
10263
    // Do buffer conversion if necessary.
10264
    if ( stream_.doConvertBuffer[1] )
10265
      convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
10266
  }
10267
 
10268
 unlock:
10269
  MUTEX_UNLOCK( &stream_.mutex );
10270
 
10271
  RtApi::tickStreamTime();
10272
  if ( doStopStream == 1 ) this->stopStream();
10273
}
10274
 
10275
static void *ossCallbackHandler( void *ptr )
10276
{
10277
  CallbackInfo *info = (CallbackInfo *) ptr;
10278
  RtApiOss *object = (RtApiOss *) info->object;
10279
  bool *isRunning = &info->isRunning;
10280
 
10281
#ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
10282
  if (info->doRealtime) {
10283
    std::cerr << "RtAudio oss: " <<
10284
             (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
10285
             "running realtime scheduling" << std::endl;
10286
  }
10287
#endif
10288
 
10289
  while ( *isRunning == true ) {
10290
    pthread_testcancel();
10291
    object->callbackEvent();
10292
  }
10293
 
10294
  pthread_exit( NULL );
10295
}
10296
 
10297
//******************** End of __LINUX_OSS__ *********************//
10298
#endif
10299
 
10300
 
10301
// *************************************************** //
10302
//
10303
// Protected common (OS-independent) RtAudio methods.
10304
//
10305
// *************************************************** //
10306
 
10307
// This method can be modified to control the behavior of error
10308
// message printing.
10309
void RtApi :: error( RtAudioError::Type type )
10310
{
10311
  errorStream_.str(""); // clear the ostringstream
10312
 
10313
  RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
10314
  if ( errorCallback ) {
10315
    // abortStream() can generate new error messages. Ignore them. Just keep original one.
10316
 
10317
    if ( firstErrorOccurred_ )
10318
      return;
10319
 
10320
    firstErrorOccurred_ = true;
10321
    const std::string errorMessage = errorText_;
10322
 
10323
    if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
10324
      stream_.callbackInfo.isRunning = false; // exit from the thread
10325
      abortStream();
10326
    }
10327
 
10328
    errorCallback( type, errorMessage );
10329
    firstErrorOccurred_ = false;
10330
    return;
10331
  }
10332
 
10333
  if ( type == RtAudioError::WARNING && showWarnings_ == true )
10334
    std::cerr << '\n' << errorText_ << "\n\n";
10335
  else if ( type != RtAudioError::WARNING )
10336
    throw( RtAudioError( errorText_, type ) );
10337
}
10338
 
10339
void RtApi :: verifyStream()
10340
{
10341
  if ( stream_.state == STREAM_CLOSED ) {
10342
    errorText_ = "RtApi:: a stream is not open!";
10343
    error( RtAudioError::INVALID_USE );
10344
  }
10345
}
10346
 
10347
void RtApi :: clearStreamInfo()
10348
{
10349
  stream_.mode = UNINITIALIZED;
10350
  stream_.state = STREAM_CLOSED;
10351
  stream_.sampleRate = 0;
10352
  stream_.bufferSize = 0;
10353
  stream_.nBuffers = 0;
10354
  stream_.userFormat = 0;
10355
  stream_.userInterleaved = true;
10356
  stream_.streamTime = 0.0;
10357
  stream_.apiHandle = 0;
10358
  stream_.deviceBuffer = 0;
10359
  stream_.callbackInfo.callback = 0;
10360
  stream_.callbackInfo.userData = 0;
10361
  stream_.callbackInfo.isRunning = false;
10362
  stream_.callbackInfo.errorCallback = 0;
10363
  for ( int i=0; i<2; i++ ) {
10364
    stream_.device[i] = 11111;
10365
    stream_.doConvertBuffer[i] = false;
10366
    stream_.deviceInterleaved[i] = true;
10367
    stream_.doByteSwap[i] = false;
10368
    stream_.nUserChannels[i] = 0;
10369
    stream_.nDeviceChannels[i] = 0;
10370
    stream_.channelOffset[i] = 0;
10371
    stream_.deviceFormat[i] = 0;
10372
    stream_.latency[i] = 0;
10373
    stream_.userBuffer[i] = 0;
10374
    stream_.convertInfo[i].channels = 0;
10375
    stream_.convertInfo[i].inJump = 0;
10376
    stream_.convertInfo[i].outJump = 0;
10377
    stream_.convertInfo[i].inFormat = 0;
10378
    stream_.convertInfo[i].outFormat = 0;
10379
    stream_.convertInfo[i].inOffset.clear();
10380
    stream_.convertInfo[i].outOffset.clear();
10381
  }
10382
}
10383
 
10384
unsigned int RtApi :: formatBytes( RtAudioFormat format )
10385
{
10386
  if ( format == RTAUDIO_SINT16 )
10387
    return 2;
10388
  else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10389
    return 4;
10390
  else if ( format == RTAUDIO_FLOAT64 )
10391
    return 8;
10392
  else if ( format == RTAUDIO_SINT24 )
10393
    return 3;
10394
  else if ( format == RTAUDIO_SINT8 )
10395
    return 1;
10396
 
10397
  errorText_ = "RtApi::formatBytes: undefined format.";
10398
  error( RtAudioError::WARNING );
10399
 
10400
  return 0;
10401
}
10402
 
10403
void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10404
{
10405
  if ( mode == INPUT ) { // convert device to user buffer
10406
    stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10407
    stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10408
    stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10409
    stream_.convertInfo[mode].outFormat = stream_.userFormat;
10410
  }
10411
  else { // convert user to device buffer
10412
    stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10413
    stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10414
    stream_.convertInfo[mode].inFormat = stream_.userFormat;
10415
    stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10416
  }
10417
 
10418
  if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10419
    stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10420
  else
10421
    stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10422
 
10423
  // Set up the interleave/deinterleave offsets.
10424
  if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10425
    if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10426
         ( mode == INPUT && stream_.userInterleaved ) ) {
10427
      for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10428
        stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10429
        stream_.convertInfo[mode].outOffset.push_back( k );
10430
        stream_.convertInfo[mode].inJump = 1;
10431
      }
10432
    }
10433
    else {
10434
      for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10435
        stream_.convertInfo[mode].inOffset.push_back( k );
10436
        stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10437
        stream_.convertInfo[mode].outJump = 1;
10438
      }
10439
    }
10440
  }
10441
  else { // no (de)interleaving
10442
    if ( stream_.userInterleaved ) {
10443
      for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10444
        stream_.convertInfo[mode].inOffset.push_back( k );
10445
        stream_.convertInfo[mode].outOffset.push_back( k );
10446
      }
10447
    }
10448
    else {
10449
      for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10450
        stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10451
        stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10452
        stream_.convertInfo[mode].inJump = 1;
10453
        stream_.convertInfo[mode].outJump = 1;
10454
      }
10455
    }
10456
  }
10457
 
10458
  // Add channel offset.
10459
  if ( firstChannel > 0 ) {
10460
    if ( stream_.deviceInterleaved[mode] ) {
10461
      if ( mode == OUTPUT ) {
10462
        for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10463
          stream_.convertInfo[mode].outOffset[k] += firstChannel;
10464
      }
10465
      else {
10466
        for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10467
          stream_.convertInfo[mode].inOffset[k] += firstChannel;
10468
      }
10469
    }
10470
    else {
10471
      if ( mode == OUTPUT ) {
10472
        for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10473
          stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10474
      }
10475
      else {
10476
        for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10477
          stream_.convertInfo[mode].inOffset[k] += ( firstChannel  * stream_.bufferSize );
10478
      }
10479
    }
10480
  }
10481
}
10482
 
10483
void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10484
{
10485
  // This function does format conversion, input/output channel compensation, and
10486
  // data interleaving/deinterleaving.  24-bit integers are assumed to occupy
10487
  // the lower three bytes of a 32-bit integer.
10488
 
10489
  // Clear our duplex device output buffer if there are more device outputs than user outputs
10490
  if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX && info.outJump > info.inJump )
10491
    memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10492
 
10493
  int j;
10494
  if (info.outFormat == RTAUDIO_FLOAT64) {
10495
    Float64 *out = (Float64 *)outBuffer;
10496
 
10497
    if (info.inFormat == RTAUDIO_SINT8) {
10498
      signed char *in = (signed char *)inBuffer;
10499
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10500
        for (j=0; j<info.channels; j++) {
10501
          out[info.outOffset[j]] = (Float64) in[info.inOffset[j]] / 128.0;
10502
        }
10503
        in += info.inJump;
10504
        out += info.outJump;
10505
      }
10506
    }
10507
    else if (info.inFormat == RTAUDIO_SINT16) {
10508
      Int16 *in = (Int16 *)inBuffer;
10509
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10510
        for (j=0; j<info.channels; j++) {
10511
          out[info.outOffset[j]] = (Float64) in[info.inOffset[j]] / 32768.0;
10512
        }
10513
        in += info.inJump;
10514
        out += info.outJump;
10515
      }
10516
    }
10517
    else if (info.inFormat == RTAUDIO_SINT24) {
10518
      Int24 *in = (Int24 *)inBuffer;
10519
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10520
        for (j=0; j<info.channels; j++) {
10521
          out[info.outOffset[j]] = (Float64) in[info.inOffset[j]].asInt() / 8388608.0;
10522
        }
10523
        in += info.inJump;
10524
        out += info.outJump;
10525
      }
10526
    }
10527
    else if (info.inFormat == RTAUDIO_SINT32) {
10528
      Int32 *in = (Int32 *)inBuffer;
10529
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10530
        for (j=0; j<info.channels; j++) {
10531
          out[info.outOffset[j]] = (Float64) in[info.inOffset[j]] / 2147483648.0;
10532
        }
10533
        in += info.inJump;
10534
        out += info.outJump;
10535
      }
10536
    }
10537
    else if (info.inFormat == RTAUDIO_FLOAT32) {
10538
      Float32 *in = (Float32 *)inBuffer;
10539
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10540
        for (j=0; j<info.channels; j++) {
10541
          out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10542
        }
10543
        in += info.inJump;
10544
        out += info.outJump;
10545
      }
10546
    }
10547
    else if (info.inFormat == RTAUDIO_FLOAT64) {
10548
      // Channel compensation and/or (de)interleaving only.
10549
      Float64 *in = (Float64 *)inBuffer;
10550
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10551
        for (j=0; j<info.channels; j++) {
10552
          out[info.outOffset[j]] = in[info.inOffset[j]];
10553
        }
10554
        in += info.inJump;
10555
        out += info.outJump;
10556
      }
10557
    }
10558
  }
10559
  else if (info.outFormat == RTAUDIO_FLOAT32) {
10560
    Float32 *out = (Float32 *)outBuffer;
10561
 
10562
    if (info.inFormat == RTAUDIO_SINT8) {
10563
      signed char *in = (signed char *)inBuffer;
10564
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10565
        for (j=0; j<info.channels; j++) {
10566
          out[info.outOffset[j]] = (Float32) in[info.inOffset[j]] / 128.f;
10567
        }
10568
        in += info.inJump;
10569
        out += info.outJump;
10570
      }
10571
    }
10572
    else if (info.inFormat == RTAUDIO_SINT16) {
10573
      Int16 *in = (Int16 *)inBuffer;
10574
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10575
        for (j=0; j<info.channels; j++) {
10576
          out[info.outOffset[j]] = (Float32) in[info.inOffset[j]] / 32768.f;
10577
        }
10578
        in += info.inJump;
10579
        out += info.outJump;
10580
      }
10581
    }
10582
    else if (info.inFormat == RTAUDIO_SINT24) {
10583
      Int24 *in = (Int24 *)inBuffer;
10584
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10585
        for (j=0; j<info.channels; j++) {
10586
          out[info.outOffset[j]] = (Float32) in[info.inOffset[j]].asInt() / 8388608.f;
10587
        }
10588
        in += info.inJump;
10589
        out += info.outJump;
10590
      }
10591
    }
10592
    else if (info.inFormat == RTAUDIO_SINT32) {
10593
      Int32 *in = (Int32 *)inBuffer;
10594
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10595
        for (j=0; j<info.channels; j++) {
10596
          out[info.outOffset[j]] = (Float32) in[info.inOffset[j]] / 2147483648.f;
10597
        }
10598
        in += info.inJump;
10599
        out += info.outJump;
10600
      }
10601
    }
10602
    else if (info.inFormat == RTAUDIO_FLOAT32) {
10603
      // Channel compensation and/or (de)interleaving only.
10604
      Float32 *in = (Float32 *)inBuffer;
10605
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10606
        for (j=0; j<info.channels; j++) {
10607
          out[info.outOffset[j]] = in[info.inOffset[j]];
10608
        }
10609
        in += info.inJump;
10610
        out += info.outJump;
10611
      }
10612
    }
10613
    else if (info.inFormat == RTAUDIO_FLOAT64) {
10614
      Float64 *in = (Float64 *)inBuffer;
10615
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10616
        for (j=0; j<info.channels; j++) {
10617
          out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10618
        }
10619
        in += info.inJump;
10620
        out += info.outJump;
10621
      }
10622
    }
10623
  }
10624
  else if (info.outFormat == RTAUDIO_SINT32) {
10625
    Int32 *out = (Int32 *)outBuffer;
10626
    if (info.inFormat == RTAUDIO_SINT8) {
10627
      signed char *in = (signed char *)inBuffer;
10628
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10629
        for (j=0; j<info.channels; j++) {
10630
          out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10631
          out[info.outOffset[j]] <<= 24;
10632
        }
10633
        in += info.inJump;
10634
        out += info.outJump;
10635
      }
10636
    }
10637
    else if (info.inFormat == RTAUDIO_SINT16) {
10638
      Int16 *in = (Int16 *)inBuffer;
10639
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10640
        for (j=0; j<info.channels; j++) {
10641
          out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10642
          out[info.outOffset[j]] <<= 16;
10643
        }
10644
        in += info.inJump;
10645
        out += info.outJump;
10646
      }
10647
    }
10648
    else if (info.inFormat == RTAUDIO_SINT24) {
10649
      Int24 *in = (Int24 *)inBuffer;
10650
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10651
        for (j=0; j<info.channels; j++) {
10652
          out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10653
          out[info.outOffset[j]] <<= 8;
10654
        }
10655
        in += info.inJump;
10656
        out += info.outJump;
10657
      }
10658
    }
10659
    else if (info.inFormat == RTAUDIO_SINT32) {
10660
      // Channel compensation and/or (de)interleaving only.
10661
      Int32 *in = (Int32 *)inBuffer;
10662
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10663
        for (j=0; j<info.channels; j++) {
10664
          out[info.outOffset[j]] = in[info.inOffset[j]];
10665
        }
10666
        in += info.inJump;
10667
        out += info.outJump;
10668
      }
10669
    }
10670
    else if (info.inFormat == RTAUDIO_FLOAT32) {
10671
      Float32 *in = (Float32 *)inBuffer;
10672
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10673
        for (j=0; j<info.channels; j++) {
10674
          // Use llround() which returns `long long` which is guaranteed to be at least 64 bits.
10675
          out[info.outOffset[j]] = (Int32) std::min(std::llround(in[info.inOffset[j]] * 2147483648.f), 2147483647LL);
10676
        }
10677
        in += info.inJump;
10678
        out += info.outJump;
10679
      }
10680
    }
10681
    else if (info.inFormat == RTAUDIO_FLOAT64) {
10682
      Float64 *in = (Float64 *)inBuffer;
10683
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10684
        for (j=0; j<info.channels; j++) {
10685
          out[info.outOffset[j]] = (Int32) std::min(std::llround(in[info.inOffset[j]] * 2147483648.0), 2147483647LL);
10686
        }
10687
        in += info.inJump;
10688
        out += info.outJump;
10689
      }
10690
    }
10691
  }
10692
  else if (info.outFormat == RTAUDIO_SINT24) {
10693
    Int24 *out = (Int24 *)outBuffer;
10694
    if (info.inFormat == RTAUDIO_SINT8) {
10695
      signed char *in = (signed char *)inBuffer;
10696
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10697
        for (j=0; j<info.channels; j++) {
10698
          out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10699
          //out[info.outOffset[j]] <<= 16;
10700
        }
10701
        in += info.inJump;
10702
        out += info.outJump;
10703
      }
10704
    }
10705
    else if (info.inFormat == RTAUDIO_SINT16) {
10706
      Int16 *in = (Int16 *)inBuffer;
10707
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10708
        for (j=0; j<info.channels; j++) {
10709
          out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10710
          //out[info.outOffset[j]] <<= 8;
10711
        }
10712
        in += info.inJump;
10713
        out += info.outJump;
10714
      }
10715
    }
10716
    else if (info.inFormat == RTAUDIO_SINT24) {
10717
      // Channel compensation and/or (de)interleaving only.
10718
      Int24 *in = (Int24 *)inBuffer;
10719
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10720
        for (j=0; j<info.channels; j++) {
10721
          out[info.outOffset[j]] = in[info.inOffset[j]];
10722
        }
10723
        in += info.inJump;
10724
        out += info.outJump;
10725
      }
10726
    }
10727
    else if (info.inFormat == RTAUDIO_SINT32) {
10728
      Int32 *in = (Int32 *)inBuffer;
10729
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10730
        for (j=0; j<info.channels; j++) {
10731
          out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10732
          //out[info.outOffset[j]] >>= 8;
10733
        }
10734
        in += info.inJump;
10735
        out += info.outJump;
10736
      }
10737
    }
10738
    else if (info.inFormat == RTAUDIO_FLOAT32) {
10739
      Float32 *in = (Float32 *)inBuffer;
10740
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10741
        for (j=0; j<info.channels; j++) {
10742
          out[info.outOffset[j]] = (Int32) std::min(std::llround(in[info.inOffset[j]] * 8388608.f), 8388607LL);
10743
        }
10744
        in += info.inJump;
10745
        out += info.outJump;
10746
      }
10747
    }
10748
    else if (info.inFormat == RTAUDIO_FLOAT64) {
10749
      Float64 *in = (Float64 *)inBuffer;
10750
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10751
        for (j=0; j<info.channels; j++) {
10752
          out[info.outOffset[j]] = (Int32) std::min(std::llround(in[info.inOffset[j]] * 8388608.0), 8388607LL);
10753
        }
10754
        in += info.inJump;
10755
        out += info.outJump;
10756
      }
10757
    }
10758
  }
10759
  else if (info.outFormat == RTAUDIO_SINT16) {
10760
    Int16 *out = (Int16 *)outBuffer;
10761
    if (info.inFormat == RTAUDIO_SINT8) {
10762
      signed char *in = (signed char *)inBuffer;
10763
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10764
        for (j=0; j<info.channels; j++) {
10765
          out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10766
          out[info.outOffset[j]] <<= 8;
10767
        }
10768
        in += info.inJump;
10769
        out += info.outJump;
10770
      }
10771
    }
10772
    else if (info.inFormat == RTAUDIO_SINT16) {
10773
      // Channel compensation and/or (de)interleaving only.
10774
      Int16 *in = (Int16 *)inBuffer;
10775
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10776
        for (j=0; j<info.channels; j++) {
10777
          out[info.outOffset[j]] = in[info.inOffset[j]];
10778
        }
10779
        in += info.inJump;
10780
        out += info.outJump;
10781
      }
10782
    }
10783
    else if (info.inFormat == RTAUDIO_SINT24) {
10784
      Int24 *in = (Int24 *)inBuffer;
10785
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10786
        for (j=0; j<info.channels; j++) {
10787
          out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10788
        }
10789
        in += info.inJump;
10790
        out += info.outJump;
10791
      }
10792
    }
10793
    else if (info.inFormat == RTAUDIO_SINT32) {
10794
      Int32 *in = (Int32 *)inBuffer;
10795
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10796
        for (j=0; j<info.channels; j++) {
10797
          out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10798
        }
10799
        in += info.inJump;
10800
        out += info.outJump;
10801
      }
10802
    }
10803
    else if (info.inFormat == RTAUDIO_FLOAT32) {
10804
      Float32 *in = (Float32 *)inBuffer;
10805
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10806
        for (j=0; j<info.channels; j++) {
10807
          out[info.outOffset[j]] = (Int16) std::min(std::llround(in[info.inOffset[j]] * 32768.f), 32767LL);
10808
        }
10809
        in += info.inJump;
10810
        out += info.outJump;
10811
      }
10812
    }
10813
    else if (info.inFormat == RTAUDIO_FLOAT64) {
10814
      Float64 *in = (Float64 *)inBuffer;
10815
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10816
        for (j=0; j<info.channels; j++) {
10817
          out[info.outOffset[j]] = (Int16) std::min(std::llround(in[info.inOffset[j]] * 32768.0), 32767LL);
10818
        }
10819
        in += info.inJump;
10820
        out += info.outJump;
10821
      }
10822
    }
10823
  }
10824
  else if (info.outFormat == RTAUDIO_SINT8) {
10825
    signed char *out = (signed char *)outBuffer;
10826
    if (info.inFormat == RTAUDIO_SINT8) {
10827
      // Channel compensation and/or (de)interleaving only.
10828
      signed char *in = (signed char *)inBuffer;
10829
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10830
        for (j=0; j<info.channels; j++) {
10831
          out[info.outOffset[j]] = in[info.inOffset[j]];
10832
        }
10833
        in += info.inJump;
10834
        out += info.outJump;
10835
      }
10836
    }
10837
    if (info.inFormat == RTAUDIO_SINT16) {
10838
      Int16 *in = (Int16 *)inBuffer;
10839
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10840
        for (j=0; j<info.channels; j++) {
10841
          out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10842
        }
10843
        in += info.inJump;
10844
        out += info.outJump;
10845
      }
10846
    }
10847
    else if (info.inFormat == RTAUDIO_SINT24) {
10848
      Int24 *in = (Int24 *)inBuffer;
10849
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10850
        for (j=0; j<info.channels; j++) {
10851
          out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10852
        }
10853
        in += info.inJump;
10854
        out += info.outJump;
10855
      }
10856
    }
10857
    else if (info.inFormat == RTAUDIO_SINT32) {
10858
      Int32 *in = (Int32 *)inBuffer;
10859
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10860
        for (j=0; j<info.channels; j++) {
10861
          out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10862
        }
10863
        in += info.inJump;
10864
        out += info.outJump;
10865
      }
10866
    }
10867
    else if (info.inFormat == RTAUDIO_FLOAT32) {
10868
      Float32 *in = (Float32 *)inBuffer;
10869
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10870
        for (j=0; j<info.channels; j++) {
10871
          out[info.outOffset[j]] = (signed char) std::min(std::llround(in[info.inOffset[j]] * 128.f), 127LL);
10872
        }
10873
        in += info.inJump;
10874
        out += info.outJump;
10875
      }
10876
    }
10877
    else if (info.inFormat == RTAUDIO_FLOAT64) {
10878
      Float64 *in = (Float64 *)inBuffer;
10879
      for (unsigned int i=0; i<stream_.bufferSize; i++) {
10880
        for (j=0; j<info.channels; j++) {
10881
          out[info.outOffset[j]] = (signed char) std::min(std::llround(in[info.inOffset[j]] * 128.0), 127LL);
10882
        }
10883
        in += info.inJump;
10884
        out += info.outJump;
10885
      }
10886
    }
10887
  }
10888
}
10889
 
10890
//static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10891
//static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10892
//static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10893
 
10894
void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10895
{
10896
  char val;
10897
  char *ptr;
10898
 
10899
  ptr = buffer;
10900
  if ( format == RTAUDIO_SINT16 ) {
10901
    for ( unsigned int i=0; i<samples; i++ ) {
10902
      // Swap 1st and 2nd bytes.
10903
      val = *(ptr);
10904
      *(ptr) = *(ptr+1);
10905
      *(ptr+1) = val;
10906
 
10907
      // Increment 2 bytes.
10908
      ptr += 2;
10909
    }
10910
  }
10911
  else if ( format == RTAUDIO_SINT32 ||
10912
            format == RTAUDIO_FLOAT32 ) {
10913
    for ( unsigned int i=0; i<samples; i++ ) {
10914
      // Swap 1st and 4th bytes.
10915
      val = *(ptr);
10916
      *(ptr) = *(ptr+3);
10917
      *(ptr+3) = val;
10918
 
10919
      // Swap 2nd and 3rd bytes.
10920
      ptr += 1;
10921
      val = *(ptr);
10922
      *(ptr) = *(ptr+1);
10923
      *(ptr+1) = val;
10924
 
10925
      // Increment 3 more bytes.
10926
      ptr += 3;
10927
    }
10928
  }
10929
  else if ( format == RTAUDIO_SINT24 ) {
10930
    for ( unsigned int i=0; i<samples; i++ ) {
10931
      // Swap 1st and 3rd bytes.
10932
      val = *(ptr);
10933
      *(ptr) = *(ptr+2);
10934
      *(ptr+2) = val;
10935
 
10936
      // Increment 2 more bytes.
10937
      ptr += 2;
10938
    }
10939
  }
10940
  else if ( format == RTAUDIO_FLOAT64 ) {
10941
    for ( unsigned int i=0; i<samples; i++ ) {
10942
      // Swap 1st and 8th bytes
10943
      val = *(ptr);
10944
      *(ptr) = *(ptr+7);
10945
      *(ptr+7) = val;
10946
 
10947
      // Swap 2nd and 7th bytes
10948
      ptr += 1;
10949
      val = *(ptr);
10950
      *(ptr) = *(ptr+5);
10951
      *(ptr+5) = val;
10952
 
10953
      // Swap 3rd and 6th bytes
10954
      ptr += 1;
10955
      val = *(ptr);
10956
      *(ptr) = *(ptr+3);
10957
      *(ptr+3) = val;
10958
 
10959
      // Swap 4th and 5th bytes
10960
      ptr += 1;
10961
      val = *(ptr);
10962
      *(ptr) = *(ptr+1);
10963
      *(ptr+1) = val;
10964
 
10965
      // Increment 5 more bytes.
10966
      ptr += 5;
10967
    }
10968
  }
10969
}
10970
 
10971
  // Indentation settings for Vim and Emacs
10972
  //
10973
  // Local Variables:
10974
  // c-basic-offset: 2
10975
  // indent-tabs-mode: nil
10976
  // End:
10977
  //
10978
  // vim: et sts=2 sw=2
10979