[关闭]
@chenlai89 2018-05-30T03:08:45.000000Z 字数 181223 阅读 1075

code

kindle


  1. /*
  2. * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "audio_device_utility.h"
  11. #include "audio_device_mac.h"
  12. #include "audio_device_config.h"
  13. #include "event_wrapper.h"
  14. #include "trace.h"
  15. #include "thread_wrapper.h"
  16. #include <cassert>
  17. #include <sys/sysctl.h> // sysctlbyname()
  18. #include <mach/mach.h> // mach_task_self()
  19. #include <libkern/OSAtomic.h> // OSAtomicCompareAndSwap()
  20. #include "portaudio/pa_ringbuffer.h"
  21. #include <CoreServices/CoreServices.h>
  22. #include <mach/mach_time.h>
  23. #include <IOKit/IOKitLib.h>
  24. #include <IOKit/usb/IOUSBLib.h>
  25. #include <IOKit/IOCFPlugIn.h>
  26. //#define TRACKDEVICEDELAY
  27. //#define DEVICE_THREAD_EXCEPTION //1. add a protection for the case: sometimes stop device but callback thread still run; 2. attation: global variable
  28. namespace webrtc
  29. {
  30. #define __MAC_OS_X_VERSION_MAX_ALLOWED 1050
  31. #define WEBRTC_CA_RETURN_ON_ERR(expr) \
  32. do { \
  33. err = expr; \
  34. if (err != noErr) { \
  35. logCAMsg(kTraceError, kTraceAudioDevice, _id, \
  36. "Error in " #expr, (const char *)&err); \
  37. return -1; \
  38. } \
  39. } while(0)
  40. #define WEBRTC_CA_LOG_ERR(expr) \
  41. do { \
  42. err = expr; \
  43. if (err != noErr) { \
  44. logCAMsg(kTraceError, kTraceAudioDevice, _id, \
  45. "Error in " #expr, (const char *)&err); \
  46. } \
  47. } while(0)
  48. #define WEBRTC_CA_LOG_WARN(expr) \
  49. do { \
  50. err = expr; \
  51. if (err != noErr) { \
  52. logCAMsg(kTraceWarning, kTraceAudioDevice, _id, \
  53. "Error in " #expr, (const char *)&err); \
  54. } \
  55. } while(0)
  56. enum
  57. {
  58. MaxNumberDevices = 64
  59. };
  60. enum
  61. {
  62. MaxNoCallbacktime = 30000 //ms
  63. };
  64. typedef struct
  65. {
  66. int8_t *p_name; //device name in utf8
  67. uint32_t len_of_name; //the length of device name
  68. int8_t *p_unique_id; //this id is unique for different device in utf8
  69. uint32_t len_of_unique_id; //length of unique id
  70. bool bCombo; /// if a compound device
  71. int num_of_devices; /// how many devices are compounded
  72. } SSB_MC_DATA_BLOCK_AUDIO_DEVICE_DES, *PSSB_MC_DATA_BLOCK_AUDIO_DEVICE_DES;
  73. #ifdef BUILD_FOR_BBM
  74. static const char* ZoomAudioDeviceName = "BBMAudioDevice";
  75. static const char* ZoomAudioDeviceName2 = "BBM";
  76. #else
  77. static const char* ZoomAudioDeviceName = "ZoomAudioDevice";
  78. static const char* ZoomAudioDeviceName2 = "Zoom-";
  79. #endif
  80. #ifdef BUILD_FOR_MIMO
  81. static const char* BlackmagicAudioName = "Blackmagic";
  82. static const char* MagewellAudioName = "XI100DUSB-HDMI";
  83. #endif
  84. #ifdef DEVICE_THREAD_EXCEPTION
  85. struct RunDeviceInfo
  86. {
  87. AudioDeviceID DeviceID;
  88. AudioDeviceIOProcID DeviceIOProcID;
  89. int errorCount;
  90. bool Stopped;
  91. };
  92. RunDeviceInfo RunMicrophoneInfo;
  93. RunDeviceInfo RunSpeakerInfo;
  94. #endif
  95. static bool MacOSMountainLionOrUpper()
  96. {
  97. UInt32 version;
  98. return ((Gestalt(gestaltSystemVersion, (SInt32*) &version) == noErr) && (version >= 0x1080));
  99. }
  100. void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue)
  101. {
  102. while (1)
  103. {
  104. int32_t oldValue = *theValue;
  105. if (OSAtomicCompareAndSwap32Barrier(oldValue, newValue, theValue)
  106. == true)
  107. {
  108. return;
  109. }
  110. }
  111. }
  112. int32_t AudioDeviceMac::AtomicGet32(int32_t* theValue)
  113. {
  114. while (1)
  115. {
  116. WebRtc_Word32 value = *theValue;
  117. if (OSAtomicCompareAndSwap32Barrier(value, value, theValue) == true)
  118. {
  119. return value;
  120. }
  121. }
  122. }
  123. // CoreAudio errors are best interpreted as four character strings.
  124. void AudioDeviceMac::logCAMsg(const TraceLevel level,
  125. const TraceModule module,
  126. const WebRtc_Word32 id, const char *msg,
  127. const char *err)
  128. {
  129. assert(msg != NULL);
  130. assert(err != NULL);
  131. #ifdef WEBRTC_BIG_ENDIAN
  132. WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err);
  133. #else
  134. // We need to flip the characters in this case.
  135. WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err
  136. + 2, err + 1, err);
  137. #endif
  138. }
  139. AudioDeviceMac::AudioDeviceMac(const WebRtc_Word32 id) :
  140. _ptrAudioBuffer(NULL),
  141. _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
  142. _critSectCb(*CriticalSectionWrapper::CreateCriticalSection()),
  143. _critSectPlayFormatChange(*CriticalSectionWrapper::CreateCriticalSection()),
  144. _critSectNotify(*CriticalSectionWrapper::CreateCriticalSection()),
  145. _critSectFormatChange(NULL),
  146. _stopEventRec(*EventWrapper::Create()),
  147. _stopEvent(*EventWrapper::Create()),
  148. _captureWorkerThread(NULL),
  149. _renderWorkerThread(NULL),
  150. _captureWorkerThreadId(0),
  151. _renderWorkerThreadId(0),
  152. _id(id),
  153. _mixerManager(id),
  154. _inputDeviceIndex(0),
  155. _inputDeviceIndexUI(0),
  156. _usingDeviceType(false),
  157. _outputDeviceIndex(0),
  158. _inputDeviceID(kAudioObjectUnknown),
  159. _outputDeviceID(kAudioObjectUnknown),
  160. _inputDeviceIsSpecified(false),
  161. _outputDeviceIsSpecified(false),
  162. _recChannels(N_REC_CHANNELS),
  163. _playChannels(N_PLAY_CHANNELS),
  164. _captureBufData(NULL),
  165. _renderBufData(NULL),
  166. _playBufType(AudioDeviceModule::kFixedBufferSize),
  167. _initialized(false),
  168. _isShutDown(false),
  169. _recording(false),
  170. _playing(false),
  171. _recIsInitialized(false),
  172. _playIsInitialized(false),
  173. _startRec(false),
  174. _stopRec(false),
  175. _stopPlay(false),
  176. _AGC(false),
  177. _renderDeviceIsAlive(1),
  178. _captureDeviceIsAlive(1),
  179. _doStop(false),
  180. _doStopRec(false),
  181. _macBookPro(false),
  182. _macBookProPanRight(false),
  183. _captureLatencyUs(0),
  184. _renderLatencyUs(0),
  185. _captureDelayUs(0),
  186. _renderDelayUs(0),
  187. _captureDelayUsUpdate(0),
  188. _captureDelayUsPrevious(0),
  189. _renderDelayOffsetSamples(0),
  190. _playBufDelayFixed(20),
  191. _playWarning(0),
  192. _playError(0),
  193. _recWarning(0),
  194. _recError(0),
  195. _loopbackrecError(0),
  196. _paCaptureBuffer(NULL),
  197. _paRenderBuffer(NULL),
  198. _captureBufSizeSamples(0),
  199. _renderBufSizeSamples(0),
  200. _pDeviceChangeNotify(0),
  201. #ifdef CHECKTIMESTAMPERROR
  202. _timestampErrorCount(0),
  203. _bCheckTimestampError(true),
  204. #endif
  205. //loopback record
  206. _loopbackDeviceIsInitialized(false),
  207. _usingLoopbackDeviceIndex(0),
  208. _loopbackDeviceIndex(0),
  209. _zoomDeviceSpeakerIndex(0),
  210. _zoomDeviceMicIndex(0),
  211. _loopbackDeviceIsSpecified(false),
  212. _loopbackRecording(false),
  213. _zoomDeviceSpeakerID(kAudioObjectUnknown),
  214. _zoomDeviceMicID(kAudioObjectUnknown),
  215. _SystemDefaultSpeakerID(kAudioObjectUnknown),
  216. _loopbackRecIsInitialized(false),
  217. _loopbackCaptureBufData(NULL),
  218. _paLoopbackCaptureBuffer(NULL),
  219. _loopbackCaptureDeviceIsAlive(1),
  220. _doStopLoopbackRec(false),
  221. _loopbackRecChannels(2),
  222. _loopbackCaptureBufDataReadIndex(0),
  223. _loopbackCaptureBufDataWriteIndex(0),
  224. _loopbackCaptureAvailbaleBufData(0),
  225. #ifdef BUILD_FOR_MIMO
  226. _LoopBackDeviceSource(kDefaultLoopbackSource),
  227. _loopbackCaptureBufDataBM(NULL),
  228. #endif
  229. _stopEventLoopbackRec(*EventWrapper::Create()),
  230. _loopbackCaptureWorkerThread(NULL),
  231. _loopbackCaptureWorkerThreadId(0),
  232. _loopbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
  233. _zoomDeviceBufferCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
  234. _bAudioShareStatus(false),
  235. _need_detect(true),
  236. _need_detect_play(true),
  237. //loopback record
  238. _recSameDevice(true),
  239. _recWaitErrorCount(0),
  240. _playWaitErrorCount(0),
  241. _capConvertFailCount(0),
  242. _playConvertFailCount(0),
  243. _bBuiltinMic(false),
  244. _bBuiltinSpk(false),
  245. _bHDMISpk(false),
  246. _bBlueSpk(false),
  247. _bBlueMic(false),
  248. _outputTargetLevelDB(0),
  249. _optVolDB(0),
  250. _enableSpkVolumeCheck(false),
  251. _speakerVolumeDB(0),
  252. _spkVolumeCheckFreq(0),
  253. _playCallbackHappened(false),
  254. _stopEventRecAgain(*EventWrapper::Create()),
  255. _stopEventAgain(*EventWrapper::Create()),
  256. _MicrophoneStartTime(0),
  257. _SpeakerStartTime(0),
  258. _recordCallbackHappened(false),
  259. _loopbackLocalSpeakerPlay(true),
  260. _bMicrophoneDefaultStreamFormatChanged(false),
  261. _bUseExclusiveMode(false),
  262. _bDefaultSpeakerIsMuted(false),
  263. _InputTimeNs(0),
  264. _NowTimeNs(0),
  265. _recDataInputTimeNs(0),
  266. _msecOnRecordSide(0),
  267. #ifdef MUTI_MICROPHONE_SUPPORT
  268. _pTmpRecordBuffer(NULL),
  269. _tmpRecordBufferSize(0),
  270. _bMutilChannelsMic(false),
  271. #endif
  272. _msecOnPlaySide(0)
  273. {
  274. WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
  275. "%s created", __FUNCTION__);
  276. assert(&_stopEvent != NULL);
  277. assert(&_stopEventRec != NULL);
  278. memset(_renderConvertData, 0, sizeof(_renderConvertData));
  279. memset(_inputDevName,0,sizeof(_inputDevName));
  280. memset(_outputDevName,0,sizeof(_outputDevName));
  281. memset(_inputDevGuid,0,sizeof(_inputDevGuid));
  282. memset(_outputDevGuid,0,sizeof(_outputDevGuid));
  283. memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription));
  284. memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
  285. memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription));
  286. memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
  287. memset(&_loopbackStreamFormat, 0, sizeof(AudioStreamBasicDescription));
  288. memset(&_loopbackDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
  289. memset(&_microphoneDefaultStreamFormat, 0, sizeof(AudioStreamBasicDescription));
  290. }
  291. AudioDeviceMac::~AudioDeviceMac()
  292. {
  293. WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
  294. "%s destroyed", __FUNCTION__);
  295. if (!_isShutDown)
  296. {
  297. Terminate();
  298. }
  299. if (_captureWorkerThread)
  300. {
  301. delete _captureWorkerThread;
  302. _captureWorkerThread = NULL;
  303. }
  304. if (_renderWorkerThread)
  305. {
  306. delete _renderWorkerThread;
  307. _renderWorkerThread = NULL;
  308. }
  309. if (_paRenderBuffer)
  310. {
  311. delete _paRenderBuffer;
  312. _paRenderBuffer = NULL;
  313. }
  314. if (_paCaptureBuffer)
  315. {
  316. delete _paCaptureBuffer;
  317. _paCaptureBuffer = NULL;
  318. }
  319. if (_renderBufData)
  320. {
  321. delete[] _renderBufData;
  322. _renderBufData = NULL;
  323. }
  324. if (_captureBufData)
  325. {
  326. delete[] _captureBufData;
  327. _captureBufData = NULL;
  328. }
  329. kern_return_t kernErr = KERN_SUCCESS;
  330. kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore);
  331. if (kernErr != KERN_SUCCESS)
  332. {
  333. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  334. " semaphore_destroy() error: %d", kernErr);
  335. }
  336. kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore);
  337. if (kernErr != KERN_SUCCESS)
  338. {
  339. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  340. " semaphore_destroy() error: %d", kernErr);
  341. }
  342. delete &_stopEvent;
  343. delete &_stopEventRec;
  344. delete &_critSect;
  345. delete &_critSectCb;
  346. delete &_critSectNotify;
  347. delete &_critSectPlayFormatChange;
  348. if (_critSectFormatChange != NULL)
  349. {
  350. delete _critSectFormatChange;
  351. }
  352. delete &_stopEventLoopbackRec;
  353. delete &_loopbackCritSect;
  354. delete &_zoomDeviceBufferCritSect;
  355. delete &_stopEventRecAgain;
  356. delete &_stopEventAgain;
  357. #ifdef MUTI_MICROPHONE_SUPPORT
  358. if(NULL != _pTmpRecordBuffer)
  359. {
  360. delete [] _pTmpRecordBuffer;
  361. _pTmpRecordBuffer = NULL;
  362. }
  363. #endif
  364. }
  365. // ============================================================================
  366. // API
  367. // ============================================================================
  368. void AudioDeviceMac::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
  369. {
  370. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  371. "%s", __FUNCTION__);
  372. CriticalSectionScoped lock(_critSect);
  373. _ptrAudioBuffer = audioBuffer;
  374. // inform the AudioBuffer about default settings for this implementation
  375. _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
  376. _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
  377. _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
  378. _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
  379. }
  380. WebRtc_Word32 AudioDeviceMac::ActiveAudioLayer(
  381. AudioDeviceModule::AudioLayer& audioLayer) const
  382. {
  383. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  384. "%s", __FUNCTION__);
  385. audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
  386. return 0;
  387. }
  388. #ifdef MUTI_MICROPHONE_SUPPORT
  389. WebRtc_Word32 AudioDeviceMac::Init(bool master)
  390. #else
  391. WebRtc_Word32 AudioDeviceMac::Init()
  392. #endif
  393. {
  394. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  395. "%s", __FUNCTION__);
  396. CriticalSectionScoped lock(_critSect);
  397. if (_initialized)
  398. {
  399. return 0;
  400. }
  401. OSStatus err = noErr;
  402. _isShutDown = false;
  403. // PortAudio ring buffers require an elementCount which is a power of two.
  404. if (_renderBufData == NULL)
  405. {
  406. UInt32 powerOfTwo = 1;
  407. while (powerOfTwo < PLAY_BUF_SIZE_IN_SAMPLES)
  408. {
  409. powerOfTwo <<= 1;
  410. }
  411. _renderBufSizeSamples = powerOfTwo;
  412. _renderBufData = new SInt16[_renderBufSizeSamples];
  413. }
  414. if (_paRenderBuffer == NULL)
  415. {
  416. _paRenderBuffer = new PaUtilRingBuffer;
  417. ring_buffer_size_t bufSize = -1;
  418. bufSize = PaUtil_InitializeRingBuffer(_paRenderBuffer, sizeof(SInt16),
  419. _renderBufSizeSamples,
  420. _renderBufData);
  421. if (bufSize == -1)
  422. {
  423. WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
  424. _id, " PaUtil_InitializeRingBuffer() error");
  425. return -1;
  426. }
  427. }
  428. if (_captureBufData == NULL)
  429. {
  430. UInt32 powerOfTwo = 1;
  431. while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES)
  432. {
  433. powerOfTwo <<= 1;
  434. }
  435. _captureBufSizeSamples = powerOfTwo;
  436. _captureBufData = new Float32[_captureBufSizeSamples];
  437. }
  438. if (_paCaptureBuffer == NULL)
  439. {
  440. _paCaptureBuffer = new PaUtilRingBuffer;
  441. ring_buffer_size_t bufSize = -1;
  442. bufSize = PaUtil_InitializeRingBuffer(_paCaptureBuffer,
  443. sizeof(Float32),
  444. _captureBufSizeSamples,
  445. _captureBufData);
  446. if (bufSize == -1)
  447. {
  448. WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
  449. _id, " PaUtil_InitializeRingBuffer() error");
  450. return -1;
  451. }
  452. }
  453. if (_renderWorkerThread == NULL)
  454. {
  455. _renderWorkerThread
  456. = ThreadWrapper::CreateThread(RunRender, this, kRealtimePriority,
  457. "RenderWorkerThread");
  458. if (_renderWorkerThread == NULL)
  459. {
  460. WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
  461. _id, " Render CreateThread() error");
  462. return -1;
  463. }
  464. }
  465. if (_captureWorkerThread == NULL)
  466. {
  467. _captureWorkerThread
  468. = ThreadWrapper::CreateThread(RunCapture, this, kRealtimePriority,
  469. "CaptureWorkerThread");
  470. if (_captureWorkerThread == NULL)
  471. {
  472. WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
  473. _id, " Capture CreateThread() error");
  474. return -1;
  475. }
  476. }
  477. kern_return_t kernErr = KERN_SUCCESS;
  478. kernErr = semaphore_create(mach_task_self(), &_renderSemaphore,
  479. SYNC_POLICY_FIFO, 0);
  480. if (kernErr != KERN_SUCCESS)
  481. {
  482. WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
  483. " semaphore_create() error: %d", kernErr);
  484. return -1;
  485. }
  486. kernErr = semaphore_create(mach_task_self(), &_captureSemaphore,
  487. SYNC_POLICY_FIFO, 0);
  488. if (kernErr != KERN_SUCCESS)
  489. {
  490. WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
  491. " semaphore_create() error: %d", kernErr);
  492. return -1;
  493. }
  494. // Setting RunLoop to NULL here instructs HAL to manage its own thread for
  495. // notifications. This was the default behaviour on OS X 10.5 and earlier, but now
  496. // must be explicitly specified. HAL would otherwise try to use the main thread to
  497. // issue notifications.
  498. AudioObjectPropertyAddress propertyAddress = {
  499. kAudioHardwarePropertyRunLoop,
  500. kAudioObjectPropertyScopeGlobal,
  501. kAudioObjectPropertyElementMaster };
  502. CFRunLoopRef runLoop = NULL;
  503. UInt32 size = sizeof(CFRunLoopRef);
  504. WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(kAudioObjectSystemObject,
  505. &propertyAddress, 0, NULL, size, &runLoop));
  506. // Listen for any device changes.
  507. propertyAddress.mSelector = kAudioHardwarePropertyDevices;
  508. WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener(kAudioObjectSystemObject,
  509. &propertyAddress, &objectListenerProc, this));
  510. // Determine if this is a MacBook Pro
  511. _macBookPro = false;
  512. _macBookProPanRight = false;
  513. char buf[128];
  514. size_t length = sizeof(buf);
  515. memset(buf, 0, length);
  516. int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0);
  517. if (intErr != 0)
  518. {
  519. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  520. " Error in sysctlbyname(): %d", err);
  521. } else
  522. {
  523. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  524. " Hardware model: %s", buf);
  525. if (strncmp(buf, "MacBookPro8", 11) == 0)
  526. // if (strncmp(buf, "MacBookPro", 10) == 0)
  527. {
  528. // _macBookPro = true;
  529. }
  530. }
  531. #ifdef MUTI_MICROPHONE_SUPPORT
  532. if (master)
  533. {
  534. _pdeviceNotifier = new CAudioDevicesNotifier;
  535. CheckAndReplaceZoomDevice();
  536. }
  537. else
  538. {
  539. _pdeviceNotifier = NULL;
  540. }
  541. #else
  542. CheckAndReplaceZoomDevice();
  543. #endif
  544. _playWarning = 0;
  545. _playError = 0;
  546. _recWarning = 0;
  547. _recError = 0;
  548. _loopbackrecError = 0;
  549. _initialized = true;
  550. m_StopDeviceQueue = dispatch_queue_create("StopDeviceQueue", NULL);;
  551. return 0;
  552. }
  553. WebRtc_Word32 AudioDeviceMac::Terminate()
  554. {
  555. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  556. "%s", __FUNCTION__);
  557. if (!_initialized)
  558. {
  559. return 0;
  560. }
  561. int retVal = 0;
  562. if (_recording)
  563. {
  564. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  565. " Recording must be stopped");
  566. StopRecording();
  567. retVal = -1;
  568. }
  569. if (_playing)
  570. {
  571. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  572. " Playback must be stopped");
  573. StopPlayout();
  574. retVal = -1;
  575. }
  576. #ifdef DEVICE_THREAD_EXCEPTION
  577. RunMicrophoneInfo.Stopped = true;
  578. RunSpeakerInfo.Stopped = true;
  579. #endif
  580. _critSect.Enter();
  581. _mixerManager.Close();
  582. OSStatus err = noErr;
  583. AudioObjectPropertyAddress propertyAddress = {
  584. kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
  585. kAudioObjectPropertyElementMaster };
  586. WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(kAudioObjectSystemObject,
  587. &propertyAddress, &objectListenerProc, this));
  588. err = AudioHardwareUnload();
  589. if (err != noErr)
  590. {
  591. logCAMsg(kTraceError, kTraceAudioDevice, _id,
  592. "Error in AudioHardwareUnload()", (const char*) &err);
  593. retVal = -1;
  594. }
  595. _critSect.Leave();
  596. #ifdef MUTI_MICROPHONE_SUPPORT
  597. if (_pdeviceNotifier)
  598. {
  599. delete _pdeviceNotifier;
  600. _pdeviceNotifier = NULL;
  601. }
  602. #endif
  603. _isShutDown = true;
  604. _initialized = false;
  605. _outputDeviceIsSpecified = false;
  606. _inputDeviceIsSpecified = false;
  607. if (m_StopDeviceQueue)
  608. {
  609. dispatch_release(m_StopDeviceQueue);
  610. }
  611. return retVal;
  612. }
  613. bool AudioDeviceMac::Initialized() const
  614. {
  615. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  616. "%s", __FUNCTION__);
  617. return (_initialized);
  618. }
  619. WebRtc_Word32 AudioDeviceMac::SpeakerIsAvailable(bool& available)
  620. {
  621. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  622. "%s", __FUNCTION__);
  623. bool wasInitialized = _mixerManager.SpeakerIsInitialized();
  624. // Make an attempt to open up the
  625. // output mixer corresponding to the currently selected output device.
  626. //
  627. if (!wasInitialized && InitSpeaker() == -1)
  628. {
  629. available = false;
  630. return 0;
  631. }
  632. // Given that InitSpeaker was successful, we know that a valid speaker exists
  633. //
  634. available = true;
  635. // Close the initialized output mixer
  636. //
  637. if (!wasInitialized)
  638. {
  639. _mixerManager.CloseSpeaker();
  640. }
  641. return 0;
  642. }
  643. WebRtc_Word32 AudioDeviceMac::InitSpeaker()
  644. {
  645. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  646. "%s", __FUNCTION__);
  647. CriticalSectionScoped lock(_critSect);
  648. if (_playing)
  649. {
  650. return -1;
  651. }
  652. if (InitDevice(_outputDeviceIndex, _outputDeviceID, false) == -1)
  653. {
  654. return -1;
  655. }
  656. if (_mixerManager.OpenSpeaker(_outputDeviceID) == -1)
  657. {
  658. return -1;
  659. }
  660. #ifdef MUTI_MICROPHONE_SUPPORT
  661. if (_pdeviceNotifier)
  662. {
  663. _pdeviceNotifier->SetUsedDeviceID(IAudioDeviceChangeNotify::kRender,_outputDeviceID);
  664. }
  665. #else
  666. _deviceNotifier.SetUsedDeviceID(IAudioDeviceChangeNotify::kRender,_outputDeviceID);
  667. #endif
  668. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  669. " InitSpeaker() _inputDeviceID:%d",_outputDeviceID);
  670. _bBuiltinSpk = false;
  671. _bHDMISpk = false;
  672. _bBlueSpk = false;
  673. UInt32 transportType;
  674. AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyTransportType,kAudioDevicePropertyScopeOutput, 0 };
  675. UInt32 size = sizeof(UInt32);
  676. OSStatus err = AudioObjectGetPropertyData(_outputDeviceID,&propertyAddress, 0, NULL, &size, &transportType);
  677. if (err == noErr) {
  678. if (transportType == 'bltn') {
  679. _bBuiltinSpk = true;
  680. }
  681. if (transportType == 'hdmi') {
  682. _bHDMISpk = true;
  683. }
  684. if (transportType == 'blue')
  685. {
  686. _bBlueSpk = true;
  687. }
  688. }
  689. return 0;
  690. }
  691. bool AudioDeviceMac::BuiltInSpk()
  692. {
  693. if (SpeakerIsInitialized())
  694. {
  695. return _bBuiltinSpk;
  696. }
  697. return false;
  698. }
  699. bool AudioDeviceMac::HDMISpk()
  700. {
  701. if (SpeakerIsInitialized())
  702. {
  703. return _bHDMISpk;
  704. }
  705. return false;
  706. }
  707. bool AudioDeviceMac::BlueSpk()
  708. {
  709. if (SpeakerIsInitialized())
  710. {
  711. return _bBlueSpk;
  712. }
  713. return false;
  714. }
  715. WebRtc_Word32 AudioDeviceMac::SetOutputTargetLevelDB(int32_t optVolDB,int targetLevelDB)
  716. {
  717. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s targetLevelDB = %d, optVolDB = %d", __FUNCTION__, targetLevelDB, optVolDB);
  718. _optVolDB = optVolDB;
  719. _outputTargetLevelDB = targetLevelDB;
  720. return 0;
  721. }
  722. WebRtc_Word32 AudioDeviceMac::EnableSpeakerVolumeCheck(bool bEnable)
  723. {
  724. _enableSpkVolumeCheck = bEnable;
  725. return 0;
  726. }
  727. WebRtc_Word32 AudioDeviceMac::SpeakerVolumeWithDB(WebRtc_Word32& speakerVolumeDB)
  728. {
  729. return _mixerManager.SpeakerVolumeWithDB(speakerVolumeDB);
  730. }
  731. WebRtc_Word32 AudioDeviceMac::MicrophoneIsAvailable(bool& available)
  732. {
  733. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  734. "%s", __FUNCTION__);
  735. bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
  736. // Make an attempt to open up the
  737. // input mixer corresponding to the currently selected output device.
  738. //
  739. if (!wasInitialized && InitMicrophone() == -1)
  740. {
  741. available = false;
  742. return 0;
  743. }
  744. // Given that InitMicrophone was successful, we know that a valid microphone exists
  745. //
  746. available = true;
  747. // Close the initialized input mixer
  748. //
  749. if (!wasInitialized)
  750. {
  751. _mixerManager.CloseMicrophone();
  752. }
  753. return 0;
  754. }
  755. WebRtc_Word32 AudioDeviceMac::InitMicrophone()
  756. {
  757. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  758. "%s", __FUNCTION__);
  759. CriticalSectionScoped lock(_critSect);
  760. if (_recording)
  761. {
  762. return -1;
  763. }
  764. if (InitDevice(_inputDeviceIndex, _inputDeviceID, true) == -1)
  765. {
  766. return -1;
  767. }
  768. if (_mixerManager.OpenMicrophone(_inputDeviceID) == -1)
  769. {
  770. return -1;
  771. }
  772. #ifdef MUTI_MICROPHONE_SUPPORT
  773. if (_pdeviceNotifier)
  774. {
  775. _pdeviceNotifier->SetUsedDeviceID(IAudioDeviceChangeNotify::kCapture,_inputDeviceID);
  776. }
  777. #else
  778. _deviceNotifier.SetUsedDeviceID(IAudioDeviceChangeNotify::kCapture,_inputDeviceID);
  779. #endif
  780. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  781. " InitMicrophone() _inputDeviceID:%d",_inputDeviceID);
  782. _bBuiltinMic = false;
  783. _bBlueMic = false;
  784. UInt32 transportType;
  785. AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyTransportType,kAudioDevicePropertyScopeInput, 0 };
  786. UInt32 size = sizeof(UInt32);
  787. OSStatus err = AudioObjectGetPropertyData(_inputDeviceID,&propertyAddress, 0, NULL, &size, &transportType);
  788. if (err == noErr) {
  789. if (transportType == 'bltn') {
  790. _bBuiltinMic = true;
  791. }
  792. if (transportType == 'blue')
  793. {
  794. _bBlueMic = true;
  795. }
  796. }
  797. return 0;
  798. }
  799. bool AudioDeviceMac::BuiltInMic()
  800. {
  801. if (MicrophoneIsInitialized()) {
  802. return _bBuiltinMic;
  803. }
  804. return false;
  805. }
  806. int32_t AudioDeviceMac::get_device_property(WebRtc_Word32 type,WebRtc_Word32 index,WebRtc_Word32 prop,void * p_data, int32_t size_of_data)
  807. {
  808. switch(prop)
  809. {
  810. case 3:
  811. {
  812. if (type == 0)
  813. {
  814. *((bool *)p_data) = _bBlueSpk;
  815. }
  816. else
  817. {
  818. *((bool *)p_data) = _bBlueMic;
  819. }
  820. }
  821. break;
  822. default:
  823. break;
  824. }
  825. return 0;
  826. }
  827. bool AudioDeviceMac::SpeakerIsInitialized() const
  828. {
  829. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  830. "%s", __FUNCTION__);
  831. return (_mixerManager.SpeakerIsInitialized());
  832. }
  833. bool AudioDeviceMac::MicrophoneIsInitialized() const
  834. {
  835. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  836. "%s", __FUNCTION__);
  837. return (_mixerManager.MicrophoneIsInitialized());
  838. }
  839. WebRtc_Word32 AudioDeviceMac::SpeakerVolumeIsAvailable(bool& available)
  840. {
  841. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  842. "%s", __FUNCTION__);
  843. available = false;
  844. bool wasInitialized = _mixerManager.SpeakerIsInitialized();
  845. // Make an attempt to open up the
  846. // output mixer corresponding to the currently selected output device.
  847. //
  848. if (!wasInitialized && InitSpeaker() == -1)
  849. {
  850. // If we end up here it means that the selected speaker has no volume
  851. // control.
  852. available = false;
  853. return 0;
  854. }
  855. _mixerManager.SpeakerVolumeIsAvailable(available);
  856. // Close the initialized output mixer
  857. //
  858. if (!wasInitialized)
  859. {
  860. _mixerManager.CloseSpeaker();
  861. }
  862. return 0;
  863. }
  864. WebRtc_Word32 AudioDeviceMac::SetSpeakerVolume(WebRtc_UWord32 volume,bool bSys)
  865. {
  866. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  867. "AudioDeviceMac::SetSpeakerVolume(volume=%u)", volume);
  868. return (_mixerManager.SetSpeakerVolume(volume,bSys));
  869. }
  870. WebRtc_Word32 AudioDeviceMac::SpeakerVolume(WebRtc_UWord32& volume,bool bSys) const
  871. {
  872. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  873. "%s", __FUNCTION__);
  874. WebRtc_UWord32 level(0);
  875. if (_mixerManager.SpeakerVolume(level,bSys) == -1)
  876. {
  877. return -1;
  878. }
  879. volume = level;
  880. return 0;
  881. }
  882. WebRtc_Word32 AudioDeviceMac::SetWaveOutVolume(WebRtc_UWord16 volumeLeft,
  883. WebRtc_UWord16 volumeRight)
  884. {
  885. WEBRTC_TRACE(
  886. kTraceModuleCall,
  887. kTraceAudioDevice,
  888. _id,
  889. "AudioDeviceMac::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)",
  890. volumeLeft, volumeRight);
  891. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  892. " API call not supported on this platform");
  893. return -1;
  894. }
  895. WebRtc_Word32
  896. AudioDeviceMac::WaveOutVolume(WebRtc_UWord16& /*volumeLeft*/,
  897. WebRtc_UWord16& /*volumeRight*/) const
  898. {
  899. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  900. "%s", __FUNCTION__);
  901. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  902. " API call not supported on this platform");
  903. return -1;
  904. }
  905. WebRtc_Word32 AudioDeviceMac::MaxSpeakerVolume(WebRtc_UWord32& maxVolume,bool bSys) const
  906. {
  907. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  908. "%s", __FUNCTION__);
  909. WebRtc_UWord32 maxVol(0);
  910. if (_mixerManager.MaxSpeakerVolume(maxVol,bSys) == -1)
  911. {
  912. return -1;
  913. }
  914. maxVolume = maxVol;
  915. return 0;
  916. }
  917. WebRtc_Word32 AudioDeviceMac::MinSpeakerVolume(WebRtc_UWord32& minVolume) const
  918. {
  919. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  920. "%s", __FUNCTION__);
  921. WebRtc_UWord32 minVol(0);
  922. if (_mixerManager.MinSpeakerVolume(minVol) == -1)
  923. {
  924. return -1;
  925. }
  926. minVolume = minVol;
  927. return 0;
  928. }
  929. WebRtc_Word32
  930. AudioDeviceMac::SpeakerVolumeStepSize(WebRtc_UWord16& stepSize) const
  931. {
  932. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  933. "%s", __FUNCTION__);
  934. WebRtc_UWord16 delta(0);
  935. if (_mixerManager.SpeakerVolumeStepSize(delta) == -1)
  936. {
  937. return -1;
  938. }
  939. stepSize = delta;
  940. return 0;
  941. }
  942. WebRtc_Word32 AudioDeviceMac::SpeakerMuteIsAvailable(bool& available)
  943. {
  944. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  945. "%s", __FUNCTION__);
  946. bool isAvailable(false);
  947. bool wasInitialized = _mixerManager.SpeakerIsInitialized();
  948. // Make an attempt to open up the
  949. // output mixer corresponding to the currently selected output device.
  950. //
  951. if (!wasInitialized && InitSpeaker() == -1)
  952. {
  953. // If we end up here it means that the selected speaker has no volume
  954. // control, hence it is safe to state that there is no mute control
  955. // already at this stage.
  956. available = false;
  957. return 0;
  958. }
  959. // Check if the selected speaker has a mute control
  960. //
  961. _mixerManager.SpeakerMuteIsAvailable(isAvailable);
  962. available = isAvailable;
  963. // Close the initialized output mixer
  964. //
  965. if (!wasInitialized)
  966. {
  967. _mixerManager.CloseSpeaker();
  968. }
  969. return 0;
  970. }
  971. WebRtc_Word32 AudioDeviceMac::SetSpeakerMute(bool enable,bool bDefault)
  972. {
  973. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  974. "AudioDeviceMac::SetSpeakerMute(enable=%u)", enable);
  975. return (_mixerManager.SetSpeakerMute(enable));
  976. }
  977. WebRtc_Word32 AudioDeviceMac::SpeakerMute(bool& enabled,bool bDefault) const
  978. {
  979. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  980. "%s", __FUNCTION__);
  981. bool muted(0);
  982. if (_mixerManager.SpeakerMute(muted) == -1)
  983. {
  984. return -1;
  985. }
  986. enabled = muted;
  987. return 0;
  988. }
  989. WebRtc_Word32 AudioDeviceMac::MicrophoneMuteIsAvailable(bool& available)
  990. {
  991. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  992. "%s", __FUNCTION__);
  993. bool isAvailable(false);
  994. bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
  995. // Make an attempt to open up the
  996. // input mixer corresponding to the currently selected input device.
  997. //
  998. if (!wasInitialized && InitMicrophone() == -1)
  999. {
  1000. // If we end up here it means that the selected microphone has no volume
  1001. // control, hence it is safe to state that there is no boost control
  1002. // already at this stage.
  1003. available = false;
  1004. return 0;
  1005. }
  1006. // Check if the selected microphone has a mute control
  1007. //
  1008. _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
  1009. available = isAvailable;
  1010. // Close the initialized input mixer
  1011. //
  1012. if (!wasInitialized)
  1013. {
  1014. _mixerManager.CloseMicrophone();
  1015. }
  1016. return 0;
  1017. }
  1018. WebRtc_Word32 AudioDeviceMac::SetMicrophoneMute(bool enable)
  1019. {
  1020. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1021. "AudioDeviceWindowsWave::SetMicrophoneMute(enable=%u)", enable);
  1022. return (_mixerManager.SetMicrophoneMute(enable));
  1023. }
  1024. WebRtc_Word32 AudioDeviceMac::MicrophoneMute(bool& enabled) const
  1025. {
  1026. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1027. "%s", __FUNCTION__);
  1028. bool muted(0);
  1029. if (_mixerManager.MicrophoneMute(muted) == -1)
  1030. {
  1031. return -1;
  1032. }
  1033. enabled = muted;
  1034. return 0;
  1035. }
  1036. WebRtc_Word32 AudioDeviceMac::MicrophoneBoostIsAvailable(bool& available)
  1037. {
  1038. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1039. "%s", __FUNCTION__);
  1040. bool isAvailable(false);
  1041. bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
  1042. // Enumerate all avaliable microphone and make an attempt to open up the
  1043. // input mixer corresponding to the currently selected input device.
  1044. //
  1045. if (!wasInitialized && InitMicrophone() == -1)
  1046. {
  1047. // If we end up here it means that the selected microphone has no volume
  1048. // control, hence it is safe to state that there is no boost control
  1049. // already at this stage.
  1050. available = false;
  1051. return 0;
  1052. }
  1053. // Check if the selected microphone has a boost control
  1054. //
  1055. _mixerManager.MicrophoneBoostIsAvailable(isAvailable);
  1056. available = isAvailable;
  1057. // Close the initialized input mixer
  1058. //
  1059. if (!wasInitialized)
  1060. {
  1061. _mixerManager.CloseMicrophone();
  1062. }
  1063. return 0;
  1064. }
  1065. WebRtc_Word32 AudioDeviceMac::SetMicrophoneBoost(bool enable)
  1066. {
  1067. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1068. "AudioDeviceMac::SetMicrophoneBoost(enable=%u)", enable);
  1069. return (_mixerManager.SetMicrophoneBoost(enable));
  1070. }
  1071. WebRtc_Word32 AudioDeviceMac::MicrophoneBoost(bool& enabled)
  1072. {
  1073. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1074. "%s", __FUNCTION__);
  1075. bool onOff(0);
  1076. if (_mixerManager.MicrophoneBoost(onOff) == -1)
  1077. {
  1078. return -1;
  1079. }
  1080. enabled = onOff;
  1081. return 0;
  1082. }
  1083. WebRtc_Word32 AudioDeviceMac::StereoRecordingIsAvailable(bool& available)
  1084. {
  1085. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1086. "%s", __FUNCTION__);
  1087. bool isAvailable(false);
  1088. bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
  1089. if (!wasInitialized && InitMicrophone() == -1)
  1090. {
  1091. // Cannot open the specified device
  1092. available = false;
  1093. return 0;
  1094. }
  1095. // Check if the selected microphone can record stereo
  1096. //
  1097. _mixerManager.StereoRecordingIsAvailable(isAvailable);
  1098. available = isAvailable;
  1099. // Close the initialized input mixer
  1100. //
  1101. if (!wasInitialized)
  1102. {
  1103. _mixerManager.CloseMicrophone();
  1104. }
  1105. return 0;
  1106. }
  1107. WebRtc_Word32 AudioDeviceMac::SetStereoRecording(bool enable)
  1108. {
  1109. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1110. "AudioDeviceMac::SetStereoRecording(enable=%u)", enable);
  1111. if (enable)
  1112. _recChannels = 2;
  1113. else
  1114. _recChannels = 1;
  1115. return 0;
  1116. }
  1117. WebRtc_Word32 AudioDeviceMac::StereoRecording(bool& enabled) const
  1118. {
  1119. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1120. "%s", __FUNCTION__);
  1121. if (_recChannels == 2)
  1122. enabled = true;
  1123. else
  1124. enabled = false;
  1125. return 0;
  1126. }
  1127. WebRtc_Word32 AudioDeviceMac::StereoPlayoutIsAvailable(bool& available)
  1128. {
  1129. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1130. "%s", __FUNCTION__);
  1131. bool isAvailable(false);
  1132. bool wasInitialized = _mixerManager.SpeakerIsInitialized();
  1133. if (!wasInitialized && InitSpeaker() == -1)
  1134. {
  1135. // Cannot open the specified device
  1136. available = false;
  1137. return 0;
  1138. }
  1139. // Check if the selected microphone can record stereo
  1140. //
  1141. _mixerManager.StereoPlayoutIsAvailable(isAvailable);
  1142. available = isAvailable;
  1143. // Close the initialized input mixer
  1144. //
  1145. if (!wasInitialized)
  1146. {
  1147. _mixerManager.CloseSpeaker();
  1148. }
  1149. return 0;
  1150. }
  1151. WebRtc_Word32 AudioDeviceMac::SetStereoPlayout(bool enable)
  1152. {
  1153. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1154. "AudioDeviceMac::SetStereoPlayout(enable=%u)", enable);
  1155. if (enable)
  1156. _playChannels = 2;
  1157. else
  1158. _playChannels = 1;
  1159. return 0;
  1160. }
  1161. WebRtc_Word32 AudioDeviceMac::StereoPlayout(bool& enabled) const
  1162. {
  1163. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1164. "%s", __FUNCTION__);
  1165. if (_playChannels == 2)
  1166. enabled = true;
  1167. else
  1168. enabled = false;
  1169. return 0;
  1170. }
  1171. WebRtc_Word32 AudioDeviceMac::SetAGC(bool enable)
  1172. {
  1173. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1174. "AudioDeviceMac::SetAGC(enable=%d)", enable);
  1175. _AGC = enable;
  1176. return 0;
  1177. }
  1178. bool AudioDeviceMac::AGC() const
  1179. {
  1180. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1181. "%s", __FUNCTION__);
  1182. return _AGC;
  1183. }
  1184. WebRtc_Word32 AudioDeviceMac::MicrophoneVolumeIsAvailable(bool& available)
  1185. {
  1186. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1187. "%s", __FUNCTION__);
  1188. available = false;
  1189. bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
  1190. // Make an attempt to open up the
  1191. // input mixer corresponding to the currently selected output device.
  1192. //
  1193. if (!wasInitialized && InitMicrophone() == -1)
  1194. {
  1195. // If we end up here it means that the selected microphone has no volume
  1196. // control.
  1197. available = false;
  1198. return 0;
  1199. }
  1200. _mixerManager.MicrophoneVolumeIsAvailable(available);
  1201. // Close the initialized input mixer
  1202. //
  1203. if (!wasInitialized)
  1204. {
  1205. _mixerManager.CloseMicrophone();
  1206. }
  1207. return 0;
  1208. }
  1209. WebRtc_Word32 AudioDeviceMac::SetMicrophoneVolume(WebRtc_UWord32 volume)
  1210. {
  1211. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1212. "AudioDeviceMac::SetMicrophoneVolume(volume=%u)", volume);
  1213. return (_mixerManager.SetMicrophoneVolume(volume));
  1214. }
  1215. WebRtc_Word32 AudioDeviceMac::MicrophoneVolume(WebRtc_UWord32& volume) const
  1216. {
  1217. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1218. "%s", __FUNCTION__);
  1219. WebRtc_UWord32 level(0);
  1220. if (_mixerManager.MicrophoneVolume(level) == -1)
  1221. {
  1222. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  1223. " failed to retrive current microphone level");
  1224. return -1;
  1225. }
  1226. volume = level;
  1227. return 0;
  1228. }
  1229. WebRtc_Word32
  1230. AudioDeviceMac::MaxMicrophoneVolume(WebRtc_UWord32& maxVolume) const
  1231. {
  1232. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1233. "%s", __FUNCTION__);
  1234. WebRtc_UWord32 maxVol(0);
  1235. if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1)
  1236. {
  1237. return -1;
  1238. }
  1239. maxVolume = maxVol;
  1240. return 0;
  1241. }
  1242. WebRtc_Word32
  1243. AudioDeviceMac::MinMicrophoneVolume(WebRtc_UWord32& minVolume) const
  1244. {
  1245. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1246. "%s", __FUNCTION__);
  1247. WebRtc_UWord32 minVol(0);
  1248. if (_mixerManager.MinMicrophoneVolume(minVol) == -1)
  1249. {
  1250. return -1;
  1251. }
  1252. minVolume = minVol;
  1253. return 0;
  1254. }
  1255. WebRtc_Word32
  1256. AudioDeviceMac::MicrophoneVolumeStepSize(WebRtc_UWord16& stepSize) const
  1257. {
  1258. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1259. "%s", __FUNCTION__);
  1260. WebRtc_UWord16 delta(0);
  1261. if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1)
  1262. {
  1263. return -1;
  1264. }
  1265. stepSize = delta;
  1266. return 0;
  1267. }
  1268. WebRtc_Word16 AudioDeviceMac::PlayoutDevices()
  1269. {
  1270. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1271. "%s", __FUNCTION__);
  1272. AudioDeviceID playDevices[MaxNumberDevices];
  1273. WebRtc_UWord8 ZoomAudioDeviceNum = 0;
  1274. WebRtc_Word32 nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices,
  1275. MaxNumberDevices,ZoomAudioDeviceNum);
  1276. if (nDevices > 0)
  1277. {
  1278. nDevices -= ZoomAudioDeviceNum;
  1279. }
  1280. return nDevices;
  1281. }
  1282. WebRtc_Word32 AudioDeviceMac::SetPlayoutDevice(WebRtc_UWord16 index)
  1283. {
  1284. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1285. "AudioDeviceMac::SetPlayoutDevice(index=%u)", index);
  1286. if (_playIsInitialized)
  1287. {
  1288. return -1;
  1289. }
  1290. AudioDeviceID playDevices[MaxNumberDevices];
  1291. WebRtc_UWord8 ZoomAudioDeviceNum = 0;
  1292. WebRtc_Word32 nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput,
  1293. playDevices, MaxNumberDevices,
  1294. ZoomAudioDeviceNum);
  1295. if (nDevices > 0)
  1296. {
  1297. nDevices -= ZoomAudioDeviceNum;
  1298. }
  1299. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  1300. " number of availiable waveform-audio output devices is %u",
  1301. nDevices);
  1302. if (index == (WebRtc_UWord16)-1)
  1303. {
  1304. }
  1305. else if (index > (nDevices - 1))
  1306. {
  1307. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  1308. " device index is out of range [0,%u]", (nDevices - 1));
  1309. return -1;
  1310. }
  1311. _outputDeviceIndex = index;
  1312. _outputDeviceIsSpecified = true;
  1313. return 0;
  1314. }
  1315. WebRtc_Word32 AudioDeviceMac::SetPlayoutDevice(
  1316. AudioDeviceModule::WindowsDeviceType device)
  1317. {
  1318. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  1319. "WindowsDeviceType not supported");
  1320. if (device == AudioDeviceModule::kDefaultCommunicationDevice)
  1321. {
  1322. if(0 == SetPlayoutDevice(0))
  1323. {
  1324. return 0;
  1325. }
  1326. else
  1327. {
  1328. return -1;
  1329. }
  1330. }
  1331. else if (device == AudioDeviceModule::kDefaultDevice)
  1332. {
  1333. if(0 == SetPlayoutDevice((WebRtc_UWord16)-1))
  1334. {
  1335. return 0;
  1336. }
  1337. else
  1338. {
  1339. return -1;
  1340. }
  1341. }
  1342. return 0;
  1343. }
  1344. WebRtc_Word32 AudioDeviceMac::GetPlayoutDevice(int &index)
  1345. {
  1346. if (_outputDeviceIsSpecified)
  1347. {
  1348. index = _outputDeviceIndex;
  1349. return 0;
  1350. }
  1351. return -1;
  1352. }
  1353. WebRtc_Word32 AudioDeviceMac::PlayoutDeviceName(
  1354. WebRtc_UWord16 index,
  1355. WebRtc_Word8 name[kAdmMaxDeviceNameSize],
  1356. WebRtc_Word8 guid[kAdmMaxGuidSize])
  1357. {
  1358. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1359. "AudioDeviceMac::PlayoutDeviceName(index=%u)", index);
  1360. if (index == (WebRtc_UWord16)-3)
  1361. {
  1362. if (name != NULL)
  1363. {
  1364. memcpy(name,_outputDevName,kAdmMaxDeviceNameSize);
  1365. if (!_playing)
  1366. {
  1367. memset(name,0,kAdmMaxDeviceNameSize);
  1368. }
  1369. }
  1370. if (guid != NULL)
  1371. {
  1372. memcpy(guid,_outputDevGuid,kAdmMaxGuidSize);
  1373. if (!_playing)
  1374. {
  1375. memset(guid,0,kAdmMaxGuidSize);
  1376. }
  1377. /// mac change id when usb device plug out and in. so here use name
  1378. /*
  1379. memset(guid, 0, kAdmMaxGuidSize);
  1380. if (_playing)
  1381. {
  1382. sprintf(guid, "%d", (int)_outputDeviceID);
  1383. }
  1384. */
  1385. }
  1386. return 0;
  1387. }
  1388. if (index == (WebRtc_UWord16)-1)
  1389. {
  1390. index = 0;
  1391. }
  1392. else if (index == (WebRtc_UWord16)-2)
  1393. {
  1394. memset(name, 0, kAdmMaxDeviceNameSize);
  1395. if (guid != NULL)
  1396. {
  1397. memset(guid, 0, kAdmMaxGuidSize);
  1398. }
  1399. return GetDeviceName(kAudioDevicePropertyScopeOutput, -1, name, guid);
  1400. }
  1401. memset(name, 0, kAdmMaxDeviceNameSize);
  1402. if (guid != NULL)
  1403. {
  1404. memset(guid, 0, kAdmMaxGuidSize);
  1405. }
  1406. #if 0
  1407. const WebRtc_UWord16 nDevices(PlayoutDevices());
  1408. if ((index > (nDevices - 1)) || (name == NULL))
  1409. {
  1410. return -1;
  1411. }
  1412. CheckAndIncreaseZoomDevice(kAudioDevicePropertyScopeOutput,nDevices,index);
  1413. return GetDeviceName(kAudioDevicePropertyScopeOutput, index, name, guid);
  1414. #else
  1415. return GetReListDeviceName(kAudioDevicePropertyScopeOutput, index, name, guid);
  1416. #endif
  1417. }
  1418. WebRtc_Word32 AudioDeviceMac::AllRelistDevice(const AudioObjectPropertyScope scope,WebRtc_Word8 name[][kAdmMaxDeviceNameSize],
  1419. WebRtc_Word8 guid[][kAdmMaxGuidSize],AudioDeviceID scopedDeviceIds[],int& number)
  1420. {
  1421. int outSize = number;
  1422. AudioDeviceID deviceIds[MaxNumberDevices];
  1423. char deviceNames[MaxNumberDevices][kAdmMaxDeviceNameSize];
  1424. WebRtc_UWord8 ZoomAudioDeviceNum = 0;
  1425. int numberDevices = GetNumberDevices(scope, deviceIds,MaxNumberDevices,ZoomAudioDeviceNum,false);
  1426. WebRtc_Word32 index1 = 0,index2 = 0;// index1 is to read from system, and index2 is write to name.
  1427. OSStatus err = noErr;
  1428. UInt32 len = kAdmMaxDeviceNameSize;
  1429. AudioObjectPropertyAddress propertyAddress = {
  1430. kAudioDevicePropertyDeviceName, scope, 0 };
  1431. for (int i = 0; i < numberDevices; i++)
  1432. {
  1433. UInt32 len = kAdmMaxDeviceNameSize;
  1434. AudioObjectGetPropertyData( deviceIds[i],
  1435. &propertyAddress, 0, NULL, &len, deviceNames[i]);
  1436. }
  1437. for (index1 = 0,index2 = 0; index1 < numberDevices && index2 < outSize;index1++)
  1438. {
  1439. char devName[128];
  1440. char devUniqueID[128];
  1441. memset(devName, 0, sizeof(devName));
  1442. memset(devUniqueID, 0, sizeof(devUniqueID));
  1443. AudioDeviceID usedID = deviceIds[index1];
  1444. UInt32 transportType = 0;
  1445. AudioObjectPropertyAddress propertyAddressTransportType = { kAudioDevicePropertyTransportType,scope, 0 };
  1446. UInt32 size = sizeof(UInt32);
  1447. OSStatus err = AudioObjectGetPropertyData(usedID,&propertyAddressTransportType, 0, NULL, &size, &transportType);
  1448. strncpy(devName,deviceNames[index1],sizeof(devName));
  1449. uint16_t sameDeviceNameCount = 0;
  1450. for (int i = 0; i < index1; i++)
  1451. {
  1452. if (strcmp(devName,deviceNames[i]) == 0)
  1453. {
  1454. sameDeviceNameCount++;
  1455. }
  1456. }
  1457. if (sameDeviceNameCount != 0)
  1458. {
  1459. char sourceName[128];
  1460. memset(sourceName,0,128);
  1461. sprintf(sourceName,"#%d",sameDeviceNameCount);
  1462. if ((strlen(devName) + strlen(sourceName)) < kAdmMaxDeviceNameSize)
  1463. {
  1464. strcat(devName,sourceName);
  1465. }
  1466. }
  1467. strncpy(devUniqueID,devName,sizeof(devUniqueID));
  1468. propertyAddress.mSelector = kAudioDevicePropertyDataSource;
  1469. Boolean hasProperty = AudioObjectHasProperty(usedID,
  1470. &propertyAddress);
  1471. if(hasProperty)
  1472. {
  1473. UInt32 dataSource = 0;
  1474. UInt32 size = sizeof(dataSource);
  1475. if(noErr == AudioObjectGetPropertyData(usedID,
  1476. &propertyAddress, 0, NULL, &size, &dataSource))
  1477. {
  1478. AudioValueTranslation trans;
  1479. CFStringRef str = NULL;
  1480. Boolean ok;
  1481. trans.mInputData = &dataSource;
  1482. trans.mInputDataSize = sizeof(UInt32);
  1483. trans.mOutputData = &str;
  1484. trans.mOutputDataSize = sizeof(CFStringRef);
  1485. propertyAddress.mSelector = kAudioDevicePropertyDataSourceNameForIDCFString;
  1486. size = sizeof(AudioValueTranslation);
  1487. if(AudioObjectGetPropertyData(usedID,
  1488. &propertyAddress,
  1489. 0,
  1490. NULL,
  1491. &size,
  1492. &trans)==noErr)
  1493. {
  1494. char sourceName[128];
  1495. if(str != NULL && CFStringGetCString(str, sourceName, 128, kCFStringEncodingUTF8))
  1496. {
  1497. if ((strlen(devName) + strlen(sourceName) + 3) < kAdmMaxDeviceNameSize)
  1498. {
  1499. strcat(devName, " (");
  1500. strcat(devName, sourceName);
  1501. strcat(devName, ")");
  1502. }
  1503. }
  1504. }
  1505. if(str)
  1506. CFRelease(str);
  1507. }
  1508. }
  1509. devName[kAdmMaxDeviceNameSize - 1] = '\0';
  1510. #ifdef BUILD_FOR_MIMO
  1511. if ((strstr(devName, ZoomAudioDeviceName2) == 0) && (strstr(devName,BlackmagicAudioName) == 0)/* && (strstr(devName,MagewellAudioName) == 0)*/
  1512. && (strncmp(devName,ZoomAudioDeviceName,strlen(ZoomAudioDeviceName)) != 0))
  1513. #else
  1514. if (strstr(devName, ZoomAudioDeviceName2) == 0 && (strncmp(devName,ZoomAudioDeviceName,strlen(ZoomAudioDeviceName)) != 0))
  1515. #endif
  1516. {
  1517. strncpy(name[index2],devName,kAdmMaxDeviceNameSize);
  1518. if (transportType == 'bltn')
  1519. {
  1520. strncpy(guid[index2],devUniqueID,kAdmMaxDeviceNameSize);
  1521. }
  1522. else
  1523. {
  1524. strncpy(guid[index2],devName,kAdmMaxDeviceNameSize);
  1525. }
  1526. scopedDeviceIds[index2] = usedID;
  1527. index2++;
  1528. }
  1529. }
  1530. number = index2;
  1531. return 0;
  1532. }
  1533. WebRtc_Word32 AudioDeviceMac::GetReListDeviceName(const AudioObjectPropertyScope scope,
  1534. WebRtc_UWord16 index, char* name, char* deviceID)
  1535. {
  1536. char deviceIds1[MaxNumberDevices][kAdmMaxDeviceNameSize];
  1537. char deviceNames1[MaxNumberDevices][kAdmMaxDeviceNameSize];
  1538. AudioDeviceID scopedDeviceIds[MaxNumberDevices];
  1539. int deviceNum = MaxNumberDevices;
  1540. AllRelistDevice(scope, deviceNames1, deviceIds1, scopedDeviceIds, deviceNum);
  1541. if(index < deviceNum)
  1542. {
  1543. strncpy((char*)(name),deviceNames1[index],kAdmMaxDeviceNameSize);
  1544. strncpy((char*)(deviceID),deviceIds1[index],kAdmMaxDeviceNameSize);
  1545. return 0;
  1546. }
  1547. else
  1548. {
  1549. return -1;
  1550. }
  1551. }
  1552. WebRtc_Word32 AudioDeviceMac::AllDeviceName(const AudioObjectPropertyScope scope,
  1553. char* name,int number)
  1554. {
  1555. SSB_MC_DATA_BLOCK_AUDIO_DEVICE_DES *p_des = (SSB_MC_DATA_BLOCK_AUDIO_DEVICE_DES *)name;
  1556. if (!p_des)
  1557. {
  1558. return -1;
  1559. }
  1560. AudioDeviceID deviceIds[MaxNumberDevices];
  1561. char deviceNames[MaxNumberDevices][kAdmMaxDeviceNameSize];
  1562. WebRtc_UWord8 ZoomAudioDeviceNum = 0;
  1563. int numberDevices = GetNumberDevices(scope, deviceIds,MaxNumberDevices,ZoomAudioDeviceNum,false);
  1564. WebRtc_Word32 index = 0,index2 = 0;// index is to read from system, and index2 is write to name.
  1565. OSStatus err = noErr;
  1566. UInt32 len = kAdmMaxDeviceNameSize;
  1567. AudioObjectPropertyAddress propertyAddress = {
  1568. kAudioDevicePropertyDeviceName, scope, 0 };
  1569. for (int i = 0; i < numberDevices; i++)
  1570. {
  1571. UInt32 len = kAdmMaxDeviceNameSize;
  1572. AudioObjectGetPropertyData( deviceIds[i],
  1573. &propertyAddress, 0, NULL, &len, deviceNames[i]);
  1574. }
  1575. for (index = 0,index2 = 0; index < numberDevices && index2 < number;index++)
  1576. {
  1577. char devName[128];
  1578. char devUniqueID[128];
  1579. memset(devName, 0, sizeof(devName));
  1580. memset(devUniqueID, 0, sizeof(devUniqueID));
  1581. AudioDeviceID usedID = deviceIds[index];
  1582. UInt32 transportType = 0;
  1583. AudioObjectPropertyAddress propertyAddressTransportType = { kAudioDevicePropertyTransportType,scope, 0 };
  1584. UInt32 size = sizeof(UInt32);
  1585. OSStatus err = AudioObjectGetPropertyData(usedID,&propertyAddressTransportType, 0, NULL, &size, &transportType);
  1586. strncpy(devName,deviceNames[index],sizeof(devName));
  1587. uint16_t sameDeviceNameCount = 0;
  1588. for (int i = 0; i < index; i++)
  1589. {
  1590. if (strcmp(devName,deviceNames[i]) == 0)
  1591. {
  1592. sameDeviceNameCount++;
  1593. }
  1594. }
  1595. if (sameDeviceNameCount != 0)
  1596. {
  1597. char sourceName[128];
  1598. memset(sourceName,0,128);
  1599. sprintf(sourceName,"#%d",sameDeviceNameCount);
  1600. if ((strlen(devName) + strlen(sourceName)) < kAdmMaxDeviceNameSize)
  1601. {
  1602. strcat(devName,sourceName);
  1603. }
  1604. }
  1605. strncpy(devUniqueID,devName,sizeof(devUniqueID));
  1606. propertyAddress.mSelector = kAudioDevicePropertyDataSource;
  1607. Boolean hasProperty = AudioObjectHasProperty(usedID,
  1608. &propertyAddress);
  1609. if(hasProperty)
  1610. {
  1611. UInt32 dataSource = 0;
  1612. UInt32 size = sizeof(dataSource);
  1613. if(noErr == AudioObjectGetPropertyData(usedID,
  1614. &propertyAddress, 0, NULL, &size, &dataSource))
  1615. {
  1616. AudioValueTranslation trans;
  1617. CFStringRef str = NULL;
  1618. Boolean ok;
  1619. trans.mInputData = &dataSource;
  1620. trans.mInputDataSize = sizeof(UInt32);
  1621. trans.mOutputData = &str;
  1622. trans.mOutputDataSize = sizeof(CFStringRef);
  1623. propertyAddress.mSelector = kAudioDevicePropertyDataSourceNameForIDCFString;
  1624. size = sizeof(AudioValueTranslation);
  1625. if(AudioObjectGetPropertyData(usedID,
  1626. &propertyAddress,
  1627. 0,
  1628. NULL,
  1629. &size,
  1630. &trans)==noErr)
  1631. {
  1632. char sourceName[128];
  1633. if(str != NULL && CFStringGetCString(str, sourceName, 128, kCFStringEncodingUTF8))
  1634. {
  1635. if ((strlen(devName) + strlen(sourceName) + 3) < kAdmMaxDeviceNameSize)
  1636. {
  1637. strcat(devName, " (");
  1638. strcat(devName, sourceName);
  1639. strcat(devName, ")");
  1640. }
  1641. }
  1642. }
  1643. if(str)
  1644. CFRelease(str);
  1645. }
  1646. }
  1647. devName[kAdmMaxDeviceNameSize - 1] = '\0';
  1648. /// mac change id when usb device plug out and in. so here use name
  1649. /*
  1650. if( deviceID )
  1651. {
  1652. sprintf(deviceID, "%d", (int)usedID);
  1653. }
  1654. */
  1655. #ifdef BUILD_FOR_MIMO
  1656. if ((strstr(devName, ZoomAudioDeviceName2) == 0) && (strstr(devName,BlackmagicAudioName) == 0)/* && (strstr(devName,MagewellAudioName) == 0)*/
  1657. && (strncmp(devName,ZoomAudioDeviceName,strlen(ZoomAudioDeviceName)) != 0))
  1658. #else
  1659. if (strstr(devName, ZoomAudioDeviceName2) == 0 && (strncmp(devName,ZoomAudioDeviceName,strlen(ZoomAudioDeviceName)) != 0))
  1660. #endif
  1661. {
  1662. strncpy((char *)p_des[index2].p_name,devName,p_des[index2].len_of_name);
  1663. if (transportType == 'bltn')
  1664. {
  1665. strncpy((char *)p_des[index2].p_unique_id,devUniqueID,p_des[index2].len_of_unique_id);
  1666. }
  1667. else
  1668. {
  1669. strncpy((char *)p_des[index2].p_unique_id,devName,p_des[index2].len_of_unique_id);
  1670. }
  1671. index2++;
  1672. }
  1673. }
  1674. return 0;
  1675. }
  1676. WebRtc_Word32 AudioDeviceMac::AllPlayoutDeviceName(char *strNameUTF8,int number)
  1677. {
  1678. return AllDeviceName(kAudioDevicePropertyScopeOutput,strNameUTF8,number);
  1679. }
  1680. WebRtc_Word32 AudioDeviceMac::RecordingDeviceName(
  1681. WebRtc_UWord16 index,
  1682. WebRtc_Word8 name[kAdmMaxDeviceNameSize],
  1683. WebRtc_Word8 guid[kAdmMaxGuidSize])
  1684. {
  1685. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1686. "AudioDeviceMac::RecordingDeviceName(index=%u)", index);
  1687. if (index == (WebRtc_UWord16)-3)
  1688. {
  1689. if (name != NULL)
  1690. {
  1691. memcpy(name,_inputDevName,kAdmMaxDeviceNameSize);
  1692. if (!_recording)
  1693. {
  1694. memset(name, 0, kAdmMaxDeviceNameSize);
  1695. }
  1696. }
  1697. if (guid != NULL)
  1698. {
  1699. memcpy(guid,_inputDevGuid,kAdmMaxGuidSize);
  1700. if (!_recording)
  1701. {
  1702. memset(guid, 0, kAdmMaxGuidSize);
  1703. }
  1704. /// mac change id when usb device plug out and in. so here use name
  1705. /*
  1706. memset(guid, 0, kAdmMaxGuidSize);
  1707. if (_recording)
  1708. {
  1709. sprintf(guid, "%d", (int)_inputDeviceID);
  1710. }
  1711. */
  1712. }
  1713. return 0;
  1714. }
  1715. if (index == (WebRtc_UWord16)-1)
  1716. {
  1717. index = 0;
  1718. }
  1719. else if (index == (WebRtc_UWord16)-2)
  1720. {
  1721. memset(name, 0, kAdmMaxDeviceNameSize);
  1722. if (guid != NULL)
  1723. {
  1724. memset(guid, 0, kAdmMaxGuidSize);
  1725. }
  1726. return GetDeviceName(kAudioDevicePropertyScopeInput, -1, name, guid);
  1727. }
  1728. memset(name, 0, kAdmMaxDeviceNameSize);
  1729. if (guid != NULL)
  1730. {
  1731. memset(guid, 0, kAdmMaxGuidSize);
  1732. }
  1733. #if 0
  1734. const WebRtc_UWord16 nDevices(RecordingDevices());
  1735. if ((index > (nDevices - 1)) || (name == NULL))
  1736. {
  1737. return -1;
  1738. }
  1739. CheckAndIncreaseZoomDevice(kAudioDevicePropertyScopeInput, nDevices,index);
  1740. return GetDeviceName(kAudioDevicePropertyScopeInput, index, name, guid);
  1741. #else
  1742. return GetReListDeviceName(kAudioDevicePropertyScopeInput, index, name, guid);
  1743. #endif
  1744. }
  1745. WebRtc_Word32 AudioDeviceMac::AllRecordingDeviceName(
  1746. char *strNameUTF8,int number)
  1747. {
  1748. return AllDeviceName(kAudioDevicePropertyScopeInput,strNameUTF8,number);
  1749. }
  1750. WebRtc_Word32 AudioDeviceMac::AllRecordingDevice(
  1751. WebRtc_Word8 name[][kAdmMaxDeviceNameSize],
  1752. WebRtc_Word8 guid[][kAdmMaxGuidSize],int& number)
  1753. {
  1754. AudioDeviceID scopedDeviceIds[MaxNumberDevices];
  1755. return AllRelistDevice(kAudioDevicePropertyScopeInput, name, guid, scopedDeviceIds, number);
  1756. }
  1757. WebRtc_Word16 AudioDeviceMac::RecordingDevices()
  1758. {
  1759. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1760. "%s", __FUNCTION__);
  1761. AudioDeviceID recDevices[MaxNumberDevices];
  1762. WebRtc_UWord8 ZoomAudioDeviceNum = 0;
  1763. WebRtc_Word32 nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput, recDevices,
  1764. MaxNumberDevices,ZoomAudioDeviceNum);
  1765. if(nDevices > 0)
  1766. {
  1767. nDevices -= ZoomAudioDeviceNum;
  1768. }
  1769. return nDevices;
  1770. }
  1771. WebRtc_Word32 AudioDeviceMac::SetRecordingDevice(WebRtc_UWord16 index,bool use_exclusive_mode)
  1772. {
  1773. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1774. "AudioDeviceMac::SetRecordingDevice(index=%u)", index);
  1775. if (_recIsInitialized)
  1776. {
  1777. return -1;
  1778. }
  1779. AudioDeviceID recDevices[MaxNumberDevices];
  1780. WebRtc_UWord8 ZoomAudioDeviceNum = 0;
  1781. WebRtc_Word32 nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput,
  1782. recDevices, MaxNumberDevices,ZoomAudioDeviceNum);
  1783. if(nDevices > 0)
  1784. {
  1785. nDevices -= ZoomAudioDeviceNum;
  1786. }
  1787. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  1788. " number of availiable waveform-audio input devices is %u",
  1789. nDevices);
  1790. if (index == (WebRtc_UWord16)-1)
  1791. {
  1792. }
  1793. else if(index == (WebRtc_UWord16)-4) /// for ultrasound device
  1794. {
  1795. if (nDevices > 0)
  1796. {
  1797. nDevices = CheckAndRemoveZoomDevice(kAudioDevicePropertyScopeInput, recDevices, nDevices + ZoomAudioDeviceNum);
  1798. }
  1799. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  1800. "AudioDeviceMac::SetRecordingDevice index -4 removed zoom devices: %u",
  1801. nDevices);
  1802. bool bBuiltinMic = false;
  1803. for(int i = 0; i < nDevices; i++)
  1804. {
  1805. UInt32 transportType ;
  1806. AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyTransportType,kAudioDevicePropertyScopeInput, 0 };
  1807. UInt32 size = sizeof(UInt32);
  1808. OSStatus err = AudioObjectGetPropertyData(recDevices[i],&propertyAddress, 0, NULL, &size, &transportType);
  1809. if (err == noErr)
  1810. {
  1811. if (transportType == 'bltn')
  1812. {
  1813. /// here judge if plug headphone
  1814. _inputDeviceIndex = i;
  1815. _inputDeviceIsSpecified = true;
  1816. bBuiltinMic = true;
  1817. _bUseExclusiveMode = use_exclusive_mode;
  1818. if (_bUseExclusiveMode)
  1819. {
  1820. AdjustMicrophoneSampleRateBaseDeviceMode();
  1821. }
  1822. break;
  1823. }
  1824. }
  1825. }
  1826. if (bBuiltinMic)
  1827. {
  1828. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  1829. "AudioDeviceMac::SetRecordingDevice index -4 have built in mic index is : %u",
  1830. _inputDeviceIndex);
  1831. return 0;
  1832. }
  1833. else
  1834. {
  1835. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  1836. "AudioDeviceMac::SetRecordingDevice index -4 have not built in mic index is : %u",
  1837. _inputDeviceIndex);
  1838. return -1;
  1839. }
  1840. }
  1841. else if (index > (nDevices - 1))
  1842. {
  1843. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  1844. " device index is out of range [0,%u]", (nDevices - 1));
  1845. return -1;
  1846. }
  1847. _inputDeviceIndex = index;
  1848. if (!_usingDeviceType)
  1849. {
  1850. _inputDeviceIndexUI = index;
  1851. }
  1852. _inputDeviceIsSpecified = true;
  1853. _bUseExclusiveMode = use_exclusive_mode;
  1854. if (_bUseExclusiveMode)
  1855. {
  1856. AdjustMicrophoneSampleRateBaseDeviceMode();
  1857. }
  1858. return 0;
  1859. }
  1860. WebRtc_Word32
  1861. AudioDeviceMac::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device,bool use_exclusive_mode)
  1862. {
  1863. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  1864. "WindowsDeviceType not supported");
  1865. _usingDeviceType = true;
  1866. if (device == AudioDeviceModule::kDefaultCommunicationDevice)
  1867. {
  1868. if(0 == SetRecordingDevice(0))
  1869. {
  1870. _inputDeviceIndexUI = -1;
  1871. _usingDeviceType = false;
  1872. return 0;
  1873. }
  1874. else
  1875. {
  1876. _usingDeviceType = false;
  1877. return -1;
  1878. }
  1879. }
  1880. else if (device == AudioDeviceModule::kDefaultDevice)
  1881. {
  1882. if(0 == SetRecordingDevice((WebRtc_UWord16)-1))
  1883. {
  1884. _inputDeviceIndexUI = -2;
  1885. _usingDeviceType = false;
  1886. return 0;
  1887. }
  1888. else
  1889. {
  1890. _usingDeviceType = false;
  1891. return -1;
  1892. }
  1893. }
  1894. _usingDeviceType = false;
  1895. }
  1896. WebRtc_Word32 AudioDeviceMac::GetRecordingDevice(int &index)
  1897. {
  1898. if (_inputDeviceIsSpecified)
  1899. {
  1900. index = _inputDeviceIndexUI;
  1901. return 0;
  1902. }
  1903. return -1;
  1904. }
  1905. WebRtc_Word32 AudioDeviceMac::PlayoutIsAvailable(bool& available)
  1906. {
  1907. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1908. "%s", __FUNCTION__);
  1909. available = false;
  1910. // Try to initialize the playout side
  1911. WebRtc_Word32 res = InitPlayout();
  1912. // Cancel effect of initialization
  1913. StopPlayout();
  1914. if (res != -1)
  1915. {
  1916. available = true;
  1917. }
  1918. return 0;
  1919. }
  1920. WebRtc_Word32 AudioDeviceMac::RecordingIsAvailable(bool& available)
  1921. {
  1922. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1923. "%s", __FUNCTION__);
  1924. available = false;
  1925. // Try to initialize the recording side
  1926. WebRtc_Word32 res = InitRecording();
  1927. // Cancel effect of initialization
  1928. StopRecording();
  1929. if (res != -1)
  1930. {
  1931. available = true;
  1932. }
  1933. return 0;
  1934. }
  1935. WebRtc_Word32 AudioDeviceMac::InitPlayout()
  1936. {
  1937. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  1938. "%s", __FUNCTION__);
  1939. CriticalSectionScoped lock(_critSect);
  1940. if (_playing)
  1941. {
  1942. return -1;
  1943. }
  1944. if (!_outputDeviceIsSpecified)
  1945. {
  1946. return -1;
  1947. }
  1948. if (_playIsInitialized)
  1949. {
  1950. return 0;
  1951. }
  1952. // Initialize the speaker (devices might have been added or removed)
  1953. if (InitSpeaker() == -1)
  1954. {
  1955. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  1956. " InitSpeaker() failed");
  1957. }
  1958. if (!MicrophoneIsInitialized())
  1959. {
  1960. // Make this call to check if we are using
  1961. // one or two devices (_twoDevices)
  1962. bool available = false;
  1963. if (MicrophoneIsAvailable(available) == -1)
  1964. {
  1965. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  1966. " MicrophoneIsAvailable() failed");
  1967. }
  1968. }
  1969. PaUtil_FlushRingBuffer(_paRenderBuffer);
  1970. OSStatus err = noErr;
  1971. UInt32 size = 0;
  1972. _renderDelayOffsetSamples = 0;
  1973. _renderDelayUs = 0;
  1974. _renderLatencyUs = 0;
  1975. _renderDeviceIsAlive = 1;
  1976. _doStop = false;
  1977. // The internal microphone of a MacBook Pro is located under the left speaker
  1978. // grille. When the internal speakers are in use, we want to fully stereo
  1979. // pan to the right.
  1980. AudioObjectPropertyAddress
  1981. propertyAddress = { kAudioDevicePropertyDataSource,
  1982. kAudioDevicePropertyScopeOutput, 0 };
  1983. if (_macBookPro)
  1984. {
  1985. _macBookProPanRight = false;
  1986. Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID,
  1987. &propertyAddress);
  1988. if (hasProperty)
  1989. {
  1990. UInt32 dataSource = 0;
  1991. size = sizeof(dataSource);
  1992. WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData(_outputDeviceID,
  1993. &propertyAddress, 0, NULL, &size, &dataSource));
  1994. if (dataSource == 'ispk')
  1995. {
  1996. // _macBookProPanRight = true;
  1997. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice,
  1998. _id,
  1999. "MacBook Pro using internal speakers; stereo"
  2000. " panning right");
  2001. } else
  2002. {
  2003. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice,
  2004. _id, "MacBook Pro not using internal speakers");
  2005. }
  2006. // Add a listener to determine if the status changes.
  2007. WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID,
  2008. &propertyAddress, &objectListenerProc, this));
  2009. }
  2010. }
  2011. // Get current stream description
  2012. propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
  2013. memset(&_outStreamFormat, 0, sizeof(_outStreamFormat));
  2014. size = sizeof(_outStreamFormat);
  2015. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
  2016. &propertyAddress, 0, NULL, &size, &_outStreamFormat));
  2017. if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM)
  2018. {
  2019. logCAMsg(kTraceError, kTraceAudioDevice, _id,
  2020. "Unacceptable output stream format -> mFormatID",
  2021. (const char *) &_outStreamFormat.mFormatID);
  2022. return -1;
  2023. }
  2024. if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS)
  2025. {
  2026. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  2027. "Too many channels on device -> mChannelsPerFrame = %d",
  2028. _outStreamFormat.mChannelsPerFrame);
  2029. return -1;
  2030. }
  2031. if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved)
  2032. {
  2033. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  2034. "Non-interleaved audio data is not supported.",
  2035. "AudioHardware streams should not have this format.");
  2036. return -1;
  2037. }
  2038. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2039. "Ouput stream format:");
  2040. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2041. "mSampleRate = %f, mChannelsPerFrame = %u",
  2042. _outStreamFormat.mSampleRate,
  2043. _outStreamFormat.mChannelsPerFrame);
  2044. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2045. "mBytesPerPacket = %u, mFramesPerPacket = %u",
  2046. _outStreamFormat.mBytesPerPacket,
  2047. _outStreamFormat.mFramesPerPacket);
  2048. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2049. "mBytesPerFrame = %u, mBitsPerChannel = %u",
  2050. _outStreamFormat.mBytesPerFrame,
  2051. _outStreamFormat.mBitsPerChannel);
  2052. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2053. "mFormatFlags = %u, mChannelsPerFrame = %u",
  2054. _outStreamFormat.mFormatFlags,
  2055. _outStreamFormat.mChannelsPerFrame);
  2056. logCAMsg(kTraceWarning, kTraceAudioDevice, _id, "mFormatID",
  2057. (const char *) &_outStreamFormat.mFormatID);
  2058. _critSectCb.Enter();
  2059. // Our preferred format to work with
  2060. _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC;
  2061. if (_outStreamFormat.mChannelsPerFrame >= 2 && (_playChannels == 2))
  2062. {
  2063. _outDesiredFormat.mChannelsPerFrame = 2;
  2064. } else
  2065. {
  2066. // Disable stereo playout when we only have one channel on the device.
  2067. _outDesiredFormat.mChannelsPerFrame = 1;
  2068. _playChannels = 1;
  2069. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2070. "Stereo playout unavailable on this device");
  2071. }
  2072. if (_ptrAudioBuffer)
  2073. {
  2074. // Update audio buffer with the selected parameters
  2075. _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
  2076. _ptrAudioBuffer->SetPlayoutChannels((WebRtc_UWord8) _playChannels);
  2077. }
  2078. _renderDelayOffsetSamples = _renderBufSizeSamples - N_BUFFERS_OUT
  2079. * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame;
  2080. _outDesiredFormat.mBytesPerPacket = _outDesiredFormat.mChannelsPerFrame
  2081. * sizeof(SInt16);
  2082. _outDesiredFormat.mFramesPerPacket = 1; // In uncompressed audio,
  2083. // a packet is one frame.
  2084. _outDesiredFormat.mBytesPerFrame = _outDesiredFormat.mChannelsPerFrame
  2085. * sizeof(SInt16);
  2086. _outDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
  2087. _outDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
  2088. | kLinearPCMFormatFlagIsPacked;
  2089. #ifdef WEBRTC_BIG_ENDIAN
  2090. _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
  2091. #endif
  2092. _outDesiredFormat.mFormatID = kAudioFormatLinearPCM;
  2093. WEBRTC_CA_LOG_ERR(AudioConverterNew(&_outDesiredFormat, &_outStreamFormat,
  2094. &_renderConverter));
  2095. _critSectCb.Leave();
  2096. // First try to set buffer size to desired value (_playBufDelayFixed)
  2097. UInt32 bufByteCount = (UInt32)((_outStreamFormat.mSampleRate / 1000.0)
  2098. * _playBufDelayFixed * _outStreamFormat.mChannelsPerFrame
  2099. * sizeof(Float32));
  2100. if (_outStreamFormat.mFramesPerPacket != 0)
  2101. {
  2102. if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0)
  2103. {
  2104. bufByteCount = ((UInt32)(bufByteCount
  2105. / _outStreamFormat.mFramesPerPacket) + 1)
  2106. * _outStreamFormat.mFramesPerPacket;
  2107. }
  2108. }
  2109. // Ensure the buffer size is within the acceptable range provided by the device.
  2110. propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
  2111. AudioValueRange range;
  2112. size = sizeof(range);
  2113. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
  2114. &propertyAddress, 0, NULL, &size, &range));
  2115. if (range.mMinimum > bufByteCount)
  2116. {
  2117. bufByteCount = range.mMinimum;
  2118. } else if (range.mMaximum < bufByteCount)
  2119. {
  2120. bufByteCount = range.mMaximum;
  2121. }
  2122. propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
  2123. size = sizeof(bufByteCount);
  2124. WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_outputDeviceID,
  2125. &propertyAddress, 0, NULL, size, &bufByteCount));
  2126. // Get render device latency
  2127. propertyAddress.mSelector = kAudioDevicePropertyLatency;
  2128. UInt32 latency = 0;
  2129. size = sizeof(UInt32);
  2130. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
  2131. &propertyAddress, 0, NULL, &size, &latency));
  2132. _renderLatencyUs = (WebRtc_UWord32) ((1.0e6 * latency)
  2133. / _outStreamFormat.mSampleRate);
  2134. #ifdef TRACKDEVICEDELAY
  2135. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2136. "play report delay kAudioDevicePropertyLatency = %d",_renderLatencyUs);
  2137. #endif
  2138. // Get render stream latency
  2139. propertyAddress.mSelector = kAudioDevicePropertyStreams;
  2140. AudioStreamID stream = 0;
  2141. size = sizeof(AudioStreamID);
  2142. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
  2143. &propertyAddress, 0, NULL, &size, &stream));
  2144. propertyAddress.mSelector = kAudioStreamPropertyLatency;
  2145. size = sizeof(UInt32);
  2146. latency = 0;
  2147. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
  2148. &propertyAddress, 0, NULL, &size, &latency));
  2149. _renderLatencyUs += (WebRtc_UWord32) ((1.0e6 * latency)
  2150. / _outStreamFormat.mSampleRate);
  2151. #ifdef TRACKDEVICEDELAY
  2152. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2153. "play report delay kAudioDevicePropertyLatency = %d",(WebRtc_UWord32) ((1.0e6 * latency)
  2154. / _outStreamFormat.mSampleRate));
  2155. #endif
  2156. if (_renderLatencyUs/1000 > 50)
  2157. {
  2158. _renderLatencyUs = 0;
  2159. }
  2160. // Listen for format changes
  2161. propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
  2162. WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID,
  2163. &propertyAddress, &objectListenerProc, this));
  2164. // Listen for processor overloads
  2165. propertyAddress.mSelector = kAudioDeviceProcessorOverload;
  2166. WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID,
  2167. &propertyAddress, &objectListenerProc, this));
  2168. // listen for volume changes
  2169. // Some devices (but not many) support a master channel
  2170. propertyAddress.mSelector = kAudioDevicePropertyVolumeScalar;
  2171. if(AudioObjectHasProperty(_outputDeviceID, &propertyAddress))
  2172. {
  2173. // WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID, &propertyAddress, &objectListenerProc, this));
  2174. }
  2175. else
  2176. {
  2177. // Typically the L and R channels are 1 and 2 respectively, but could be different
  2178. /*
  2179. propertyAddress.mElement = 1;
  2180. WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID, &propertyAddress, &objectListenerProc, this));
  2181. propertyAddress.mElement = 2;
  2182. WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID, &propertyAddress, &objectListenerProc, this));
  2183. */
  2184. }
  2185. if (AudioDeviceCreateIOProcID != NULL)
  2186. {
  2187. WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_outputDeviceID,
  2188. deviceIOProc, this, &_deviceIOProcID));
  2189. }
  2190. else
  2191. {
  2192. WEBRTC_CA_RETURN_ON_ERR(AudioDeviceAddIOProc(_outputDeviceID, deviceIOProc, this));
  2193. }
  2194. // Mark playout side as initialized
  2195. _playIsInitialized = true;
  2196. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  2197. " initial playout status: _renderDelayOffsetSamples=%d,"
  2198. " _renderDelayUs=%d, _renderLatencyUs=%d",
  2199. _renderDelayOffsetSamples, _renderDelayUs, _renderLatencyUs);
  2200. return 0;
  2201. }
  2202. WebRtc_Word32 AudioDeviceMac::InitRecording()
  2203. {
  2204. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  2205. "%s", __FUNCTION__);
  2206. CriticalSectionScoped lock(_critSect);
  2207. if (_recording)
  2208. {
  2209. return -1;
  2210. }
  2211. if (!_inputDeviceIsSpecified)
  2212. {
  2213. return -1;
  2214. }
  2215. if (_recIsInitialized)
  2216. {
  2217. return 0;
  2218. }
  2219. // Initialize the microphone (devices might have been added or removed)
  2220. if (InitMicrophone() == -1)
  2221. {
  2222. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2223. " InitMicrophone() failed");
  2224. }
  2225. if (!SpeakerIsInitialized())
  2226. {
  2227. // Make this call to check if we are using
  2228. // one or two devices (_twoDevices)
  2229. bool available = false;
  2230. if (SpeakerIsAvailable(available) == -1)
  2231. {
  2232. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2233. " SpeakerIsAvailable() failed");
  2234. }
  2235. }
  2236. char MicrophoneName[128];
  2237. memset(MicrophoneName,0, sizeof(MicrophoneName));
  2238. bool bMicrophoneHasName = GetDeviceFriendName(kAudioDevicePropertyScopeInput, _inputDeviceID, MicrophoneName);
  2239. if (bMicrophoneHasName)
  2240. {
  2241. const char* LogitechConferenceCamConnect = "ConferenceCam Connect";
  2242. if (strncmp(MicrophoneName, LogitechConferenceCamConnect, strlen(LogitechConferenceCamConnect)) == 0)
  2243. {
  2244. SetSpecialMicrophoneFormat(16000,0,true,false);
  2245. }
  2246. #ifdef BUILD_FOR_MIMO
  2247. const static char* RevolabsAudioDevice[] = {
  2248. "Bloomberg",
  2249. "Revolabs"
  2250. };
  2251. for(int i = 0;i < sizeof(RevolabsAudioDevice)/sizeof(char*); i++)
  2252. {
  2253. if (strstr(MicrophoneName, RevolabsAudioDevice[i]) != 0)
  2254. {
  2255. SetSpecialMicrophoneFormat(0,24,false,true);
  2256. }
  2257. }
  2258. #endif
  2259. #ifdef MUTI_MICROPHONE_SUPPORT
  2260. _bMutilChannelsMic = false;
  2261. if (strstr(MicrophoneName, "ZOOM UAC-8") != 0 || strstr(MicrophoneName,"ZOOM UAC-2") != 0 || strstr(MicrophoneName,"Dante Virtual Soundcard") != 0 || strstr(MicrophoneName,"Scarlett 18i20 USB") != 0
  2262. || (strstr(MicrophoneName,"UMC1820") != 0) || (strstr(MicrophoneName,"UMC404HD") != 0))
  2263. {
  2264. _bMutilChannelsMic = true;
  2265. }
  2266. #endif
  2267. }
  2268. OSStatus err = noErr;
  2269. UInt32 size = 0;
  2270. PaUtil_FlushRingBuffer(_paCaptureBuffer);
  2271. _captureDelayUs = 0;
  2272. _captureLatencyUs = 0;
  2273. _captureDelayUsUpdate = 0;
  2274. _captureDelayUsPrevious = 0;
  2275. _captureDeviceIsAlive = 1;
  2276. _doStopRec = false;
  2277. // Get current stream description
  2278. AudioObjectPropertyAddress
  2279. propertyAddress = { kAudioDevicePropertyStreamFormat,
  2280. kAudioDevicePropertyScopeInput, 0 };
  2281. memset(&_inStreamFormat, 0, sizeof(_inStreamFormat));
  2282. size = sizeof(_inStreamFormat);
  2283. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
  2284. &propertyAddress, 0, NULL, &size, &_inStreamFormat));
  2285. if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM)
  2286. {
  2287. logCAMsg(kTraceError, kTraceAudioDevice, _id,
  2288. "Unacceptable input stream format -> mFormatID",
  2289. (const char *) &_inStreamFormat.mFormatID);
  2290. return -1;
  2291. }
  2292. if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS)
  2293. {
  2294. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  2295. ", Too many channels on device (mChannelsPerFrame = %d)",
  2296. _inStreamFormat.mChannelsPerFrame);
  2297. return -1;
  2298. }
  2299. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2300. " Input stream format:");
  2301. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2302. " mSampleRate = %f, mChannelsPerFrame = %u",
  2303. _inStreamFormat.mSampleRate, _inStreamFormat.mChannelsPerFrame);
  2304. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2305. " mBytesPerPacket = %u, mFramesPerPacket = %u",
  2306. _inStreamFormat.mBytesPerPacket,
  2307. _inStreamFormat.mFramesPerPacket);
  2308. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2309. " mBytesPerFrame = %u, mBitsPerChannel = %u",
  2310. _inStreamFormat.mBytesPerFrame,
  2311. _inStreamFormat.mBitsPerChannel);
  2312. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2313. " mFormatFlags = %u, mChannelsPerFrame = %u",
  2314. _inStreamFormat.mFormatFlags,
  2315. _inStreamFormat.mChannelsPerFrame);
  2316. logCAMsg(kTraceWarning, kTraceAudioDevice, _id, "mFormatID",
  2317. (const char *) &_inStreamFormat.mFormatID);
  2318. _critSectCb.Enter();
  2319. // Our preferred format to work with
  2320. #ifdef MUTI_MICROPHONE_SUPPORT
  2321. if(_bMutilChannelsMic)
  2322. {
  2323. _inDesiredFormat.mChannelsPerFrame = _inStreamFormat.mChannelsPerFrame;
  2324. _recChannels = _inDesiredFormat.mChannelsPerFrame;
  2325. }
  2326. else
  2327. #endif
  2328. {
  2329. if (_inStreamFormat.mChannelsPerFrame > 2)
  2330. {
  2331. _inDesiredFormat.mChannelsPerFrame = _inStreamFormat.mChannelsPerFrame;
  2332. _recChannels = 1;
  2333. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2334. "mutil-channel recording on this device");
  2335. }
  2336. else if (_inStreamFormat.mChannelsPerFrame == 2 && (_recChannels == 2))
  2337. {
  2338. _inDesiredFormat.mChannelsPerFrame = 2;
  2339. }
  2340. else
  2341. {
  2342. // Disable stereo recording when we only have one channel on the device.
  2343. _inDesiredFormat.mChannelsPerFrame = 1;
  2344. _recChannels = 1;
  2345. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2346. "Stereo recording unavailable on this device");
  2347. }
  2348. }
  2349. if (_ptrAudioBuffer)
  2350. {
  2351. // Update audio buffer with the selected parameters
  2352. #ifdef MUTI_MICROPHONE_SUPPORT
  2353. _ptrAudioBuffer->SetMultiChannelsRecording(_bMutilChannelsMic);
  2354. #endif
  2355. _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
  2356. _ptrAudioBuffer->SetRecordingChannels((WebRtc_UWord8) _recChannels);
  2357. }
  2358. _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC;
  2359. _inDesiredFormat.mBytesPerPacket = _inDesiredFormat.mChannelsPerFrame
  2360. * sizeof(SInt16);
  2361. _inDesiredFormat.mFramesPerPacket = 1;
  2362. _inDesiredFormat.mBytesPerFrame = _inDesiredFormat.mChannelsPerFrame
  2363. * sizeof(SInt16);
  2364. _inDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
  2365. _inDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
  2366. | kLinearPCMFormatFlagIsPacked;
  2367. #ifdef WEBRTC_BIG_ENDIAN
  2368. _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
  2369. #endif
  2370. _inDesiredFormat.mFormatID = kAudioFormatLinearPCM;
  2371. WEBRTC_CA_LOG_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredFormat,
  2372. &_captureConverter));
  2373. _critSectCb.Leave();
  2374. // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO)
  2375. // TODO(xians): investigate this block.
  2376. UInt32 bufByteCount = (UInt32)((_inStreamFormat.mSampleRate / 1000.0)
  2377. * 10.0 * N_BLOCKS_IO * _inStreamFormat.mChannelsPerFrame
  2378. * sizeof(Float32));
  2379. if(bMicrophoneHasName)
  2380. {
  2381. const char* FireWire410 = "FireWire 410 Multichannel";
  2382. if (strncmp(MicrophoneName, FireWire410, strlen(FireWire410)) == 0) {
  2383. bufByteCount *= 2;
  2384. }
  2385. }
  2386. if (_inStreamFormat.mFramesPerPacket != 0)
  2387. {
  2388. if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0)
  2389. {
  2390. bufByteCount = ((UInt32)(bufByteCount
  2391. / _inStreamFormat.mFramesPerPacket) + 1)
  2392. * _inStreamFormat.mFramesPerPacket;
  2393. }
  2394. }
  2395. // Ensure the buffer size is within the acceptable range provided by the device.
  2396. propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
  2397. AudioValueRange range;
  2398. size = sizeof(range);
  2399. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
  2400. &propertyAddress, 0, NULL, &size, &range));
  2401. if (range.mMinimum > bufByteCount)
  2402. {
  2403. bufByteCount = range.mMinimum;
  2404. } else if (range.mMaximum < bufByteCount)
  2405. {
  2406. bufByteCount = range.mMaximum;
  2407. }
  2408. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2409. " bufByteCount = %u,range.mMinimum = %u,range.mMaximum = %u",
  2410. bufByteCount, range.mMinimum, range.mMaximum);
  2411. propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
  2412. size = sizeof(bufByteCount);
  2413. WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_inputDeviceID,
  2414. &propertyAddress, 0, NULL, size, &bufByteCount));
  2415. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2416. " AudioObjectSetPropertyData selector:%d,size:%d",
  2417. propertyAddress.mSelector,size);
  2418. // Get capture device latency
  2419. propertyAddress.mSelector = kAudioDevicePropertyLatency;
  2420. UInt32 latency = 0;
  2421. size = sizeof(UInt32);
  2422. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
  2423. &propertyAddress, 0, NULL, &size, &latency));
  2424. _captureLatencyUs = (UInt32)((1.0e6 * latency)
  2425. / _inStreamFormat.mSampleRate);
  2426. #ifdef TRACKDEVICEDELAY
  2427. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2428. "capture report delay kAudioDevicePropertyLatency = %d",_captureLatencyUs);
  2429. #endif
  2430. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2431. " AudioObjectSetPropertyData selector:%d,size:%d",
  2432. propertyAddress.mSelector,size);
  2433. // Get capture stream latency
  2434. propertyAddress.mSelector = kAudioDevicePropertyStreams;
  2435. AudioStreamID stream = 0;
  2436. size = sizeof(AudioStreamID);
  2437. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
  2438. &propertyAddress, 0, NULL, &size, &stream));
  2439. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2440. " AudioObjectSetPropertyData selector:%d,size:%d",
  2441. propertyAddress.mSelector,size);
  2442. propertyAddress.mSelector = kAudioStreamPropertyLatency;
  2443. size = sizeof(UInt32);
  2444. latency = 0;
  2445. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
  2446. &propertyAddress, 0, NULL, &size, &latency));
  2447. _captureLatencyUs += (UInt32)((1.0e6 * latency)
  2448. / _inStreamFormat.mSampleRate);
  2449. #ifdef TRACKDEVICEDELAY
  2450. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2451. "capture report delay kAudioStreamPropertyLatency = %d",(UInt32)((1.0e6 * latency)/_inStreamFormat.mSampleRate));
  2452. #endif
  2453. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2454. " AudioObjectSetPropertyData selector:%d,size:%d",
  2455. propertyAddress.mSelector,size);
  2456. //Siping: fix echo for C920 microphone
  2457. if (_captureLatencyUs/1000 > 50)
  2458. {
  2459. _captureLatencyUs = 0;
  2460. }
  2461. // Listen for format changes
  2462. // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged?
  2463. if (_critSectFormatChange == NULL)
  2464. {
  2465. _critSectFormatChange = CriticalSectionWrapper::CreateCriticalSection();
  2466. }
  2467. propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
  2468. WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_inputDeviceID,
  2469. &propertyAddress, &objectListenerProc, this));
  2470. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2471. " AudioObjectSetPropertyData selector:%d",
  2472. propertyAddress.mSelector);
  2473. // Listen for processor overloads
  2474. propertyAddress.mSelector = kAudioDeviceProcessorOverload;
  2475. WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_inputDeviceID,
  2476. &propertyAddress, &objectListenerProc, this));
  2477. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2478. " AudioObjectSetPropertyData selector:%d",
  2479. propertyAddress.mSelector);
  2480. // listen for volume changes
  2481. // Some devices (but not many) support a master channel
  2482. /*
  2483. propertyAddress.mSelector = kAudioDevicePropertyVolumeScalar;
  2484. if(AudioObjectHasProperty(_inputDeviceID, &propertyAddress))
  2485. {
  2486. WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_inputDeviceID, &propertyAddress, &objectListenerProc, this));
  2487. }
  2488. else
  2489. {
  2490. // Typically the L and R channels are 1 and 2 respectively, but could be different
  2491. propertyAddress.mElement = 1;
  2492. WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_inputDeviceID, &propertyAddress, &objectListenerProc, this));
  2493. propertyAddress.mElement = 2;
  2494. WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_inputDeviceID, &propertyAddress, &objectListenerProc, this));
  2495. }
  2496. */
  2497. _recSameDevice = true;
  2498. if (AudioDeviceCreateIOProcID != NULL)
  2499. {
  2500. WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_inputDeviceID,
  2501. inDeviceIOProc, this, &_inDeviceIOProcID));
  2502. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2503. "AudioDeviceCreateIOProcID _twoDevices = %d,_inputDeviceID = %d,_inDeviceIOProcID = %d", _twoDevices,_inputDeviceID,_inDeviceIOProcID);
  2504. }
  2505. else
  2506. {
  2507. WEBRTC_CA_RETURN_ON_ERR(AudioDeviceAddIOProc(_inputDeviceID, inDeviceIOProc,
  2508. this));
  2509. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2510. "AudioDeviceAddIOProc _twoDevices = %d,_inputDeviceID = %d,_inDeviceIOProcID = %d", _twoDevices,_inputDeviceID,_inDeviceIOProcID);
  2511. }
  2512. // Mark recording side as initialized
  2513. _recIsInitialized = true;
  2514. return 0;
  2515. }
  2516. WebRtc_Word32 AudioDeviceMac::StartRecording()
  2517. {
  2518. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  2519. "%s", __FUNCTION__);
  2520. CriticalSectionScoped lock(_critSect);
  2521. if (!_recIsInitialized)
  2522. {
  2523. return -1;
  2524. }
  2525. if (_recording)
  2526. {
  2527. return 0;
  2528. }
  2529. if (!_initialized)
  2530. {
  2531. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  2532. " Recording worker thread has not been started");
  2533. return -1;
  2534. }
  2535. OSStatus err = noErr;
  2536. unsigned int threadID(0);
  2537. if (_captureWorkerThread != NULL)
  2538. {
  2539. _captureWorkerThread->Start(threadID);
  2540. }
  2541. _captureWorkerThreadId = threadID;
  2542. _need_detect = true;
  2543. _recWaitErrorCount = 0;
  2544. #ifdef DEVICE_THREAD_EXCEPTION
  2545. RunMicrophoneInfo.Stopped = false;
  2546. RunMicrophoneInfo.errorCount = 0;
  2547. #endif
  2548. if (AudioDeviceCreateIOProcID != NULL)
  2549. {
  2550. WEBRTC_CA_LOG_ERR(AudioDeviceStart(_inputDeviceID, _inDeviceIOProcID));
  2551. if (err != noErr)
  2552. {
  2553. #ifdef DEVICE_THREAD_EXCEPTION
  2554. RunMicrophoneInfo.Stopped = true;
  2555. #endif
  2556. return -1;
  2557. }
  2558. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2559. "AudioDeviceStart _twoDevices = %d,_inputDeviceID = %d,_recSameDevice = %d", _twoDevices,_inputDeviceID,_recSameDevice);
  2560. }
  2561. else
  2562. {
  2563. WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, inDeviceIOProc));
  2564. }
  2565. #ifdef DEVICE_THREAD_EXCEPTION
  2566. RunMicrophoneInfo.DeviceID = _inputDeviceID;
  2567. if (AudioDeviceCreateIOProcID != NULL)
  2568. {
  2569. RunMicrophoneInfo.DeviceIOProcID = _inDeviceIOProcID;
  2570. }
  2571. #endif
  2572. _recordCallbackHappened = false;
  2573. _recording = true;
  2574. #ifdef CHECKTIMESTAMPERROR
  2575. _timestampErrorCount = 0;
  2576. _bCheckTimestampError = true;
  2577. #endif
  2578. _MicrophoneStartTime = mach_absolute_time();
  2579. return 0;
  2580. }
  2581. WebRtc_Word32 AudioDeviceMac::StopRecording()
  2582. {
  2583. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  2584. "%s", __FUNCTION__);
  2585. CriticalSectionScoped lock(_critSect);
  2586. if (!_recIsInitialized)
  2587. {
  2588. return 0;
  2589. }
  2590. _critSect.Leave();
  2591. if (_captureWorkerThread != NULL)
  2592. {
  2593. if (!_captureWorkerThread->Stop())
  2594. {
  2595. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  2596. " Timed out waiting for the capture worker thread to "
  2597. "stop.");
  2598. if(!_captureWorkerThread->Shutdown())
  2599. {
  2600. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  2601. " Timed out waiting for the capture worker thread to "
  2602. "Shutdown.");
  2603. }
  2604. else
  2605. {
  2606. _critSectCb.Enter();
  2607. if (_critSectFormatChange)
  2608. {
  2609. delete _critSectFormatChange;
  2610. _critSectFormatChange = NULL;
  2611. }
  2612. _critSectCb.Leave();
  2613. }
  2614. }
  2615. }
  2616. _critSect.Enter();
  2617. _recWaitErrorCount = 0;
  2618. OSStatus err = noErr;
  2619. // Stop device
  2620. int32_t captureDeviceIsAlive = AtomicGet32(&_captureDeviceIsAlive);
  2621. {
  2622. if (_recording /*&& captureDeviceIsAlive == 1*/)
  2623. {
  2624. _recording = false;
  2625. _doStopRec = true; // Signal to io proc to stop audio device
  2626. _critSect.Leave(); // Cannot be under lock, risk of deadlock
  2627. uint64_t elapsedStartAbsTime = mach_absolute_time() - _MicrophoneStartTime;
  2628. Nanoseconds elapsedStartNano = AbsoluteToNanoseconds( *(AbsoluteTime *) &elapsedStartAbsTime );
  2629. uint64_t elapsedStartMs = (* (uint64_t *) &elapsedStartNano) / 1000000;
  2630. if (!captureDeviceIsAlive || (!_recordCallbackHappened && (elapsedStartMs > MaxNoCallbacktime))|| kEventSignaled != _stopEventRec.Wait(WAIT_THREAD_TERMINAL))
  2631. {
  2632. if (m_StopDeviceQueue)
  2633. {
  2634. dispatch_async(m_StopDeviceQueue, ^{
  2635. CriticalSectionScoped critScoped(_critSect);
  2636. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2637. " Timed out stopping the capture IOProc. "
  2638. "We may have failed to detect a device removal.");
  2639. if (_doStopRec)
  2640. {
  2641. if (AudioDeviceCreateIOProcID != NULL)
  2642. {
  2643. AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID);
  2644. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2645. " StopRecording AudioDeviceStop _inputDeviceID = %d _outputDeviceID = %d.",_inputDeviceID,_outputDeviceID);
  2646. }
  2647. else
  2648. {
  2649. AudioDeviceStop(_inputDeviceID,inDeviceIOProc);
  2650. }
  2651. if (AudioDeviceDestroyIOProcID != NULL)
  2652. {
  2653. AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID);
  2654. }
  2655. else
  2656. {
  2657. AudioDeviceRemoveIOProc(_inputDeviceID, inDeviceIOProc);
  2658. }
  2659. _doStopRec = false;
  2660. }
  2661. _stopEventRecAgain.Set();
  2662. });
  2663. }
  2664. if (kEventSignaled != _stopEventRecAgain.Wait(WAIT_THREAD_TERMINAL))
  2665. {
  2666. //force crash
  2667. int crashAddress = 120;
  2668. int* ptr = (int*)crashAddress;
  2669. *ptr = 0;
  2670. }
  2671. }
  2672. _critSect.Enter();
  2673. _doStopRec = false;
  2674. WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
  2675. " Recording stopped");
  2676. }
  2677. }
  2678. #ifdef DEVICE_THREAD_EXCEPTION
  2679. RunMicrophoneInfo.Stopped = true;
  2680. RunMicrophoneInfo.errorCount = 0;
  2681. #endif
  2682. // Setting this signal will allow the worker thread to be stopped.
  2683. AtomicSet32(&_captureDeviceIsAlive, 0);
  2684. _critSectCb.Enter();
  2685. WEBRTC_CA_LOG_WARN(AudioConverterDispose(_captureConverter));
  2686. _critSectCb.Leave();
  2687. // Remove listeners.
  2688. AudioObjectPropertyAddress
  2689. propertyAddress = { kAudioDevicePropertyStreamFormat,
  2690. kAudioDevicePropertyScopeInput, 0 };
  2691. WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_inputDeviceID,
  2692. &propertyAddress, &objectListenerProc, this));
  2693. propertyAddress.mSelector = kAudioDeviceProcessorOverload;
  2694. WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_inputDeviceID,
  2695. &propertyAddress, &objectListenerProc, this));
  2696. // Some devices (but not many) support a master channel
  2697. /*
  2698. propertyAddress.mSelector = kAudioDevicePropertyVolumeScalar;
  2699. if(AudioObjectHasProperty(_inputDeviceID, &propertyAddress))
  2700. {
  2701. WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_inputDeviceID, &propertyAddress, &objectListenerProc, this));
  2702. }
  2703. else
  2704. {
  2705. // Typically the L and R channels are 1 and 2 respectively, but could be different
  2706. propertyAddress.mElement = 1;
  2707. WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_inputDeviceID, &propertyAddress, &objectListenerProc, this));
  2708. propertyAddress.mElement = 2;
  2709. WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_inputDeviceID, &propertyAddress, &objectListenerProc, this));
  2710. }
  2711. */
  2712. _recIsInitialized = false;
  2713. _recording = false;
  2714. if (_bUseExclusiveMode)
  2715. {
  2716. _bUseExclusiveMode = false;
  2717. AdjustMicrophoneSampleRateBaseDeviceMode();
  2718. }
  2719. return 0;
  2720. }
  2721. class USBDeviceInfo
  2722. {
  2723. public:
  2724. USBDeviceInfo(UInt16 PID, UInt16 VID, UInt32 LocationID)
  2725. : m_pId(PID)
  2726. , m_vId(VID)
  2727. , m_locationId(LocationID){}
  2728. UInt16 m_pId;
  2729. UInt16 m_vId;
  2730. UInt32 m_locationId;
  2731. };
  2732. WebRtc_Word32 AudioDeviceMac::SetSpecialMicrophoneFormat(Float64 sampleRate, UInt32 bitsPerChannel, bool bSetSampleRate, bool bSetBitsPerChannel) const
  2733. {
  2734. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "SetSpecialMicrophoneFormat begin");
  2735. UInt32 size = 0;
  2736. OSStatus err = noErr;
  2737. AudioStreamBasicDescription microphoneDefaultStreamFormat;
  2738. AudioObjectPropertyAddress propertyAddressForFormat = { kAudioDevicePropertyStreamFormat,
  2739. kAudioDevicePropertyScopeInput, 0 };
  2740. memset(&microphoneDefaultStreamFormat, 0, sizeof(microphoneDefaultStreamFormat));
  2741. size = sizeof(microphoneDefaultStreamFormat);
  2742. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
  2743. &propertyAddressForFormat, 0, NULL, &size, &microphoneDefaultStreamFormat));
  2744. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "SetSpecialMicrophoneFormat bitsPerChannel = %d, default bitsPerchannel = %d,extension size = %d", bitsPerChannel,microphoneDefaultStreamFormat.mBitsPerChannel,_mUSBExtenderWithAudioIssueVec.size());
  2745. if (bSetSampleRate && microphoneDefaultStreamFormat.mSampleRate != sampleRate)
  2746. {
  2747. AudioStreamBasicDescription* p;
  2748. Boolean ow;
  2749. int i;
  2750. UInt32 propertySize=0; //sizeof(p);
  2751. AudioObjectPropertyAddress propertyAddressForAllFormat = { kAudioDevicePropertyStreamFormats,
  2752. kAudioDevicePropertyScopeInput, 0 };
  2753. err = AudioObjectGetPropertyDataSize(_inputDeviceID,
  2754. &propertyAddressForAllFormat, 0, NULL, &propertySize);
  2755. if(err == noErr)
  2756. {
  2757. p = (AudioStreamBasicDescription*)malloc(propertySize);
  2758. err = AudioObjectGetPropertyData(_inputDeviceID,
  2759. &propertyAddressForAllFormat, 0, NULL, &propertySize, p);
  2760. if (err == noErr)
  2761. {
  2762. int indexForFormat = -1;
  2763. for(int i=0;i<propertySize/sizeof(AudioStreamBasicDescription);i++)
  2764. {
  2765. AudioStreamBasicDescription* pp = &(p[i]);
  2766. if ((pp->mSampleRate == sampleRate) && (pp->mFormatID == kAudioFormatLinearPCM))
  2767. {
  2768. indexForFormat = i;
  2769. break;
  2770. }
  2771. }
  2772. if (indexForFormat != -1 )
  2773. {
  2774. err = AudioObjectSetPropertyData(_inputDeviceID,
  2775. &propertyAddressForFormat,
  2776. 0,
  2777. NULL,
  2778. sizeof(AudioStreamBasicDescription),
  2779. &(p[indexForFormat]));
  2780. if (err == noErr)
  2781. {
  2782. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2783. "SetSpecialMicrophoneFormat AdjustMicrophoneSampleRate set microphone format to 16K");
  2784. }
  2785. }
  2786. }
  2787. free(p);
  2788. }
  2789. }
  2790. if (bSetBitsPerChannel && microphoneDefaultStreamFormat.mBitsPerChannel != bitsPerChannel)
  2791. {
  2792. bool bHasUSBExtender = false;
  2793. if(isMacOS10124AndLater())
  2794. {
  2795. const UInt16 audioDeviceVendorID[] = {0x2abf};
  2796. /*
  2797. const USBDeviceInfo allUSBExtenderDeviceInfo[] ={
  2798. USBDeviceInfo(0x2512,0x0424,0)
  2799. ,USBDeviceInfo(0x6506,0x04b4,0)
  2800. // ,USBDeviceInfo(2,2,1)
  2801. // ,USBDeviceInfo(3,3,1)
  2802. };
  2803. */
  2804. std::vector<USBDeviceInfo> usbExtenderVector;
  2805. std::vector<USBDeviceInfo> specialDeviceVector;
  2806. usbExtenderVector.clear();
  2807. specialDeviceVector.clear();
  2808. CFMutableDictionaryRef matchingDict;
  2809. io_iterator_t iter;
  2810. kern_return_t kr;
  2811. io_service_t usbDeviceRef;
  2812. /* set up a matching dictionary for the class */
  2813. matchingDict = IOServiceMatching(kIOUSBDeviceClassName);
  2814. if (matchingDict != NULL)
  2815. {
  2816. kr = IOServiceGetMatchingServices(kIOMasterPortDefault, matchingDict, &iter);
  2817. if (kr == KERN_SUCCESS)
  2818. {
  2819. while ((usbDeviceRef = IOIteratorNext(iter)))
  2820. {
  2821. if (usbDeviceRef)
  2822. {
  2823. IOUSBDeviceInterface245** usbDeviceInterface = NULL;
  2824. SInt32 score;
  2825. IOCFPlugInInterface** plugin = NULL;
  2826. kern_return_t err;
  2827. err = IOCreatePlugInInterfaceForService(usbDeviceRef, kIOUSBDeviceUserClientTypeID, kIOCFPlugInInterfaceID, &plugin, &score);
  2828. if (err == 0 && plugin)
  2829. {
  2830. err = (*plugin)->QueryInterface(plugin,CFUUIDGetUUIDBytes(kIOUSBDeviceInterfaceID245),(LPVOID*)&usbDeviceInterface);
  2831. if (err == 0 && usbDeviceInterface)
  2832. {
  2833. UInt16 PID = 0;
  2834. UInt16 VID = 0;
  2835. UInt32 LocationID = 0;
  2836. err = (*usbDeviceInterface)->GetDeviceVendor(usbDeviceInterface,&VID);
  2837. err = (*usbDeviceInterface)->GetDeviceProduct(usbDeviceInterface,&PID);
  2838. err = (*usbDeviceInterface)->GetLocationID(usbDeviceInterface,&LocationID);
  2839. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "SetSpecialMicrophoneFormat VID = 0x%x, PID = 0x%x,LocationID = 0x%x", VID,PID,LocationID);
  2840. if (err == 0)
  2841. {
  2842. for (int i = 0; i < sizeof(audioDeviceVendorID)/sizeof(UInt16); i++)
  2843. {
  2844. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "SetSpecialMicrophoneFormat 1 , audioDeviceVendorID[i] = 0x%x,i = %d", audioDeviceVendorID[i],i);
  2845. if (VID == audioDeviceVendorID[i])
  2846. {
  2847. specialDeviceVector.push_back(USBDeviceInfo(PID,VID,LocationID));
  2848. }
  2849. }
  2850. for (int i = 0; i < _mUSBExtenderWithAudioIssueVec.size(); i++)
  2851. {
  2852. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "SetSpecialMicrophoneFormat 2 VID = 0x%x, PID = 0x%x,i = %d", _mUSBExtenderWithAudioIssueVec[i].extenderVID,_mUSBExtenderWithAudioIssueVec[i].extenderPID,i);
  2853. if (VID == _mUSBExtenderWithAudioIssueVec[i].extenderVID && PID == _mUSBExtenderWithAudioIssueVec[i].extenderPID)
  2854. {
  2855. usbExtenderVector.push_back(USBDeviceInfo(PID,VID,LocationID));
  2856. }
  2857. }
  2858. }
  2859. (*usbDeviceInterface)->Release(usbDeviceInterface);
  2860. }
  2861. IODestroyPlugInInterface(plugin);
  2862. }
  2863. IOObjectRelease(usbDeviceRef);
  2864. }
  2865. }
  2866. }
  2867. IOObjectRelease(iter);
  2868. }
  2869. if (!(specialDeviceVector.empty())
  2870. && !(usbExtenderVector.empty()))
  2871. {
  2872. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2873. "SetSpecialMicrophoneFormat AdjustMicrophoneSampleRate usbExtender.size = %d, specialDeviceVector.size = %d", usbExtenderVector.size(), specialDeviceVector.size());
  2874. char audioDeviceLocationIDHEX[64];
  2875. char USBExtenderLocationIDHEX[64];
  2876. for (int i = 0; i < specialDeviceVector.size(); i++)
  2877. {
  2878. //0x14240000, 0x14200000
  2879. //0x14242000, 0x14202000
  2880. memset(audioDeviceLocationIDHEX, 0, sizeof(audioDeviceLocationIDHEX));
  2881. sprintf(audioDeviceLocationIDHEX, "0x%x",(unsigned int)specialDeviceVector[i].m_locationId);
  2882. for (int j = 0; j < usbExtenderVector.size(); j++)
  2883. {
  2884. memset(USBExtenderLocationIDHEX, 0, sizeof(USBExtenderLocationIDHEX));
  2885. sprintf(USBExtenderLocationIDHEX, "0x%x",(unsigned int)usbExtenderVector[j].m_locationId);
  2886. int k = strlen(audioDeviceLocationIDHEX) - 1;
  2887. for (; k >= 0; k--) {
  2888. if (audioDeviceLocationIDHEX[k] != '0')
  2889. {
  2890. break;
  2891. }
  2892. else
  2893. {
  2894. audioDeviceLocationIDHEX[k] = '\0';
  2895. }
  2896. }
  2897. k = strlen(USBExtenderLocationIDHEX) - 1;
  2898. for (; k >= 0; k--) {
  2899. if (USBExtenderLocationIDHEX[k] != '0')
  2900. {
  2901. break;
  2902. }
  2903. else
  2904. {
  2905. USBExtenderLocationIDHEX[k] = '\0';
  2906. }
  2907. }
  2908. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2909. "SetSpecialMicrophoneFormat AdjustMicrophoneSampleRate audioDevice's locationID = %s,extender's locationID = %s, k = %d",audioDeviceLocationIDHEX,USBExtenderLocationIDHEX,k);
  2910. if (NULL != strstr(audioDeviceLocationIDHEX,USBExtenderLocationIDHEX))
  2911. {
  2912. bHasUSBExtender = true;
  2913. break;
  2914. }
  2915. }
  2916. if (bHasUSBExtender)
  2917. {
  2918. break;
  2919. }
  2920. }
  2921. }
  2922. }
  2923. if (!bHasUSBExtender)
  2924. {
  2925. return 0;
  2926. }
  2927. AudioStreamBasicDescription* p;
  2928. Boolean ow;
  2929. int i;
  2930. UInt32 propertySize=0; //sizeof(p);
  2931. AudioObjectPropertyAddress propertyAddressForAllFormat = { kAudioDevicePropertyStreamFormats,
  2932. kAudioDevicePropertyScopeInput, 0 };
  2933. err = AudioObjectGetPropertyDataSize(_inputDeviceID,
  2934. &propertyAddressForAllFormat, 0, NULL, &propertySize);
  2935. if(err == noErr)
  2936. {
  2937. p = (AudioStreamBasicDescription*)malloc(propertySize);
  2938. err = AudioObjectGetPropertyData(_inputDeviceID,
  2939. &propertyAddressForAllFormat, 0, NULL, &propertySize, p);
  2940. if (err == noErr)
  2941. {
  2942. int indexForFormat = -1;
  2943. for(int i=0;i<propertySize/sizeof(AudioStreamBasicDescription);i++)
  2944. {
  2945. AudioStreamBasicDescription* pp = &(p[i]);
  2946. if ((pp->mBitsPerChannel == bitsPerChannel) && (pp->mFormatID == kAudioFormatLinearPCM))
  2947. {
  2948. indexForFormat = i;
  2949. break;
  2950. }
  2951. }
  2952. if (indexForFormat != -1 )
  2953. {
  2954. err = AudioObjectSetPropertyData(_inputDeviceID,
  2955. &propertyAddressForFormat,
  2956. 0,
  2957. NULL,
  2958. sizeof(AudioStreamBasicDescription),
  2959. &(p[indexForFormat]));
  2960. if (err == noErr)
  2961. {
  2962. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  2963. "SetSpecialMicrophoneFormat AdjustMicrophoneSampleRate set microphone format to 24bit");
  2964. }
  2965. }
  2966. }
  2967. free(p);
  2968. }
  2969. }
  2970. return 0;
  2971. }
  2972. bool AudioDeviceMac::RecordingIsInitialized() const
  2973. {
  2974. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  2975. "%s", __FUNCTION__);
  2976. return (_recIsInitialized);
  2977. }
  2978. bool AudioDeviceMac::Recording() const
  2979. {
  2980. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  2981. "%s", __FUNCTION__);
  2982. return (_recording);
  2983. }
  2984. bool AudioDeviceMac::PlayoutIsInitialized() const
  2985. {
  2986. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  2987. "%s", __FUNCTION__);
  2988. return (_playIsInitialized);
  2989. }
  2990. WebRtc_Word32 AudioDeviceMac::StartPlayout()
  2991. {
  2992. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  2993. "%s", __FUNCTION__);
  2994. if (!_playIsInitialized)
  2995. {
  2996. return -1;
  2997. }
  2998. if (_playing)
  2999. {
  3000. return 0;
  3001. }
  3002. OSStatus err = noErr;
  3003. unsigned int threadID(0);
  3004. if (_renderWorkerThread != NULL)
  3005. {
  3006. _renderWorkerThread->Start(threadID);
  3007. }
  3008. _renderWorkerThreadId = threadID;
  3009. _need_detect_play = true;
  3010. #ifdef DEVICE_THREAD_EXCEPTION
  3011. RunSpeakerInfo.Stopped = false;
  3012. RunSpeakerInfo.errorCount = 0;
  3013. #endif
  3014. if (AudioDeviceCreateIOProcID != NULL)
  3015. {
  3016. WEBRTC_CA_LOG_ERR(AudioDeviceStart(_outputDeviceID, _deviceIOProcID));
  3017. if (err != noErr)
  3018. {
  3019. #ifdef DEVICE_THREAD_EXCEPTION
  3020. RunSpeakerInfo.Stopped = true;
  3021. #endif
  3022. return -1;
  3023. }
  3024. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  3025. "AudioDeviceStart %s,_outputDeviceID = %d,_deviceIOProcID = %d", __FUNCTION__,_outputDeviceID,_deviceIOProcID);
  3026. }
  3027. else
  3028. {
  3029. WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, deviceIOProc));
  3030. }
  3031. _playCallbackHappened = false;
  3032. _playing = true;
  3033. _playWaitErrorCount = 0;
  3034. #ifdef DEVICE_THREAD_EXCEPTION
  3035. RunSpeakerInfo.DeviceID = _outputDeviceID;
  3036. if (AudioDeviceCreateIOProcID != NULL)
  3037. {
  3038. RunSpeakerInfo.DeviceIOProcID = _deviceIOProcID;
  3039. }
  3040. #endif
  3041. #ifdef CHECKTIMESTAMPERROR
  3042. _timestampErrorCount = 0;
  3043. _bCheckTimestampError = true;
  3044. #endif
  3045. #ifdef BUILD_FOR_MIMO
  3046. _SpeakerStartTime = mach_absolute_time();
  3047. AudioDeviceID SystemDefaultSpeakerID = kAudioObjectUnknown;
  3048. if(GetSystemDefaultPlayDevice(SystemDefaultSpeakerID))
  3049. {
  3050. if (/*(SystemDefaultSpeakerID != _zoomDeviceSpeakerID) &&*/ (SystemDefaultSpeakerID != _outputDeviceID))
  3051. {
  3052. SetSystemDefaultPlayDevice(_outputDeviceID);
  3053. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "StartPlayout SetSystemDefaultPlayDevice");
  3054. }
  3055. }
  3056. #endif
  3057. return 0;
  3058. }
  3059. WebRtc_Word32 AudioDeviceMac::StopPlayout()
  3060. {
  3061. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  3062. "%s", __FUNCTION__);
  3063. CriticalSectionScoped lock(_critSect);
  3064. if (!_playIsInitialized)
  3065. {
  3066. return 0;
  3067. }
  3068. OSStatus err = noErr;
  3069. int32_t renderDeviceIsAlive = AtomicGet32(&_renderDeviceIsAlive);
  3070. if (_playing /*&& renderDeviceIsAlive == 1*/)
  3071. {
  3072. // We signal a stop for a shared device even when capturing has not
  3073. // yet ended. This is to ensure the IOProc will return early as
  3074. // intended (by checking |_playing|) before accessing resources we
  3075. // free below (e.g. the render converter).
  3076. //
  3077. // In the case of a shared device, the IOProc will verify capturing
  3078. // has ended before stopping itself.
  3079. _playing = false;
  3080. _doStop = true; // Signal to io proc to stop audio device
  3081. _critSect.Leave(); // Cannot be under lock, risk of deadlock
  3082. uint64_t elapsedStartAbsTime = mach_absolute_time() - _SpeakerStartTime;
  3083. Nanoseconds elapsedStartNs = AbsoluteToNanoseconds( *(AbsoluteTime *) &elapsedStartAbsTime );
  3084. uint64_t elapsedStartMs = (* (uint64_t *) &elapsedStartNs) / 1000000;
  3085. if (!renderDeviceIsAlive || (!_playCallbackHappened && (elapsedStartMs > MaxNoCallbacktime) )|| kEventSignaled != _stopEvent.Wait(WAIT_THREAD_TERMINAL))
  3086. {
  3087. if (m_StopDeviceQueue)
  3088. {
  3089. dispatch_async(m_StopDeviceQueue, ^{
  3090. CriticalSectionScoped critScoped(_critSect);
  3091. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  3092. " Timed out stopping the render IOProc. "
  3093. "We may have failed to detect a device removal.");
  3094. // We assume capturing on a shared device has stopped as well if the
  3095. // IOProc times out.
  3096. if (_doStop)
  3097. {
  3098. if (AudioDeviceCreateIOProcID != NULL)
  3099. {
  3100. AudioDeviceStop(_outputDeviceID, _deviceIOProcID);
  3101. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  3102. " StopPlayout AudioDeviceStop _inputDeviceID = %d _outputDeviceID = %d.",_inputDeviceID,_outputDeviceID);
  3103. }
  3104. else
  3105. {
  3106. AudioDeviceStop(_outputDeviceID,deviceIOProc);
  3107. }
  3108. if (AudioDeviceDestroyIOProcID != NULL)
  3109. {
  3110. AudioDeviceDestroyIOProcID(_outputDeviceID,_deviceIOProcID);
  3111. }
  3112. else
  3113. {
  3114. AudioDeviceRemoveIOProc(_outputDeviceID, deviceIOProc);
  3115. }
  3116. _doStop = false;
  3117. }
  3118. _stopEventAgain.Set();
  3119. });
  3120. }
  3121. if (kEventSignaled != _stopEventAgain.Wait(WAIT_THREAD_TERMINAL))
  3122. {
  3123. //force crash
  3124. int crashAddress = 120;
  3125. int* ptr = (int*)crashAddress;
  3126. *ptr = 0;
  3127. }
  3128. }
  3129. _critSect.Enter();
  3130. _doStop = false;
  3131. WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
  3132. "Playout stopped");
  3133. }
  3134. // Setting this signal will allow the worker thread to be stopped.
  3135. AtomicSet32(&_renderDeviceIsAlive, 0);
  3136. _critSect.Leave();
  3137. if (_renderWorkerThread != NULL)
  3138. {
  3139. if (!_renderWorkerThread->Stop())
  3140. {
  3141. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  3142. " Timed out waiting for the render worker thread to "
  3143. "stop.");
  3144. }
  3145. }
  3146. #ifdef DEVICE_THREAD_EXCEPTION
  3147. RunSpeakerInfo.Stopped = true;
  3148. RunSpeakerInfo.errorCount = 0;
  3149. #endif
  3150. _critSect.Enter();
  3151. _critSectCb.Enter();
  3152. WEBRTC_CA_LOG_WARN(AudioConverterDispose(_renderConverter));
  3153. _critSectCb.Leave();
  3154. // Remove listeners.
  3155. AudioObjectPropertyAddress propertyAddress = {
  3156. kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput,
  3157. 0 };
  3158. WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID,
  3159. &propertyAddress, &objectListenerProc, this));
  3160. propertyAddress.mSelector = kAudioDeviceProcessorOverload;
  3161. WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID,
  3162. &propertyAddress, &objectListenerProc, this));
  3163. // Some devices (but not many) support a master channel
  3164. /*
  3165. propertyAddress.mSelector = kAudioDevicePropertyVolumeScalar;
  3166. if(AudioObjectHasProperty(_outputDeviceID, &propertyAddress))
  3167. {
  3168. WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID, &propertyAddress, &objectListenerProc, this));
  3169. }
  3170. else
  3171. {
  3172. // Typically the L and R channels are 1 and 2 respectively, but could be different
  3173. propertyAddress.mElement = 1;
  3174. WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID, &propertyAddress, &objectListenerProc, this));
  3175. propertyAddress.mElement = 2;
  3176. WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID, &propertyAddress, &objectListenerProc, this));
  3177. }
  3178. */
  3179. if (_macBookPro)
  3180. {
  3181. propertyAddress.mSelector = kAudioDevicePropertyDataSource;
  3182. Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID,
  3183. &propertyAddress);
  3184. if (hasProperty)
  3185. {
  3186. WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID,
  3187. &propertyAddress, &objectListenerProc, this));
  3188. }
  3189. }
  3190. _playIsInitialized = false;
  3191. _playing = false;
  3192. _playWaitErrorCount = 0;
  3193. return 0;
  3194. }
  3195. WebRtc_Word32 AudioDeviceMac::PlayoutDelay(WebRtc_UWord16& delayMS) const
  3196. {
  3197. int32_t renderDelayUs = AtomicGet32(&_msecOnPlaySide);
  3198. delayMS = static_cast<WebRtc_UWord16> (1e-3 * renderDelayUs + 0.5);
  3199. return 0;
  3200. }
  3201. WebRtc_Word32 AudioDeviceMac::RecordingDelay(WebRtc_UWord16& delayMS) const
  3202. {
  3203. int32_t captureDelayUs = AtomicGet32(&_msecOnRecordSide);
  3204. delayMS = static_cast<WebRtc_UWord16> (1e-3 * captureDelayUs + 0.5);
  3205. return 0;
  3206. }
  3207. WebRtc_Word32 AudioDeviceMac::RecordingTS(WebRtc_UWord64* timestampNS) const
  3208. {
  3209. *timestampNS = _recDataInputTimeNs;
  3210. return 0;
  3211. }
  3212. bool AudioDeviceMac::Playing() const
  3213. {
  3214. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  3215. "%s", __FUNCTION__);
  3216. return (_playing);
  3217. }
  3218. WebRtc_Word32 AudioDeviceMac::SetPlayoutBuffer(
  3219. const AudioDeviceModule::BufferType type,
  3220. WebRtc_UWord16 sizeMS)
  3221. {
  3222. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  3223. "AudioDeviceMac::SetPlayoutBuffer(type=%u, sizeMS=%u)", type,
  3224. sizeMS);
  3225. if (type != AudioDeviceModule::kFixedBufferSize)
  3226. {
  3227. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  3228. " Adaptive buffer size not supported on this platform");
  3229. return -1;
  3230. }
  3231. _playBufType = type;
  3232. _playBufDelayFixed = sizeMS;
  3233. return 0;
  3234. }
  3235. WebRtc_Word32 AudioDeviceMac::PlayoutBuffer(
  3236. AudioDeviceModule::BufferType& type,
  3237. WebRtc_UWord16& sizeMS) const
  3238. {
  3239. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  3240. "%s", __FUNCTION__);
  3241. type = _playBufType;
  3242. sizeMS = _playBufDelayFixed;
  3243. return 0;
  3244. }
  3245. // Not implemented for Mac.
  3246. WebRtc_Word32 AudioDeviceMac::CPULoad(WebRtc_UWord16& /*load*/) const
  3247. {
  3248. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  3249. "%s", __FUNCTION__);
  3250. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  3251. " API call not supported on this platform");
  3252. return -1;
  3253. }
  3254. bool AudioDeviceMac::PlayoutWarning() const
  3255. {
  3256. return (_playWarning > 0);
  3257. }
  3258. WebRtc_UWord16 AudioDeviceMac::PlayoutError() const
  3259. {
  3260. return (_playError);
  3261. }
  3262. bool AudioDeviceMac::RecordingWarning() const
  3263. {
  3264. return (_recWarning > 0);
  3265. }
  3266. WebRtc_UWord16 AudioDeviceMac::RecordingError() const
  3267. {
  3268. return (_recError);
  3269. }
  3270. WebRtc_UWord16 AudioDeviceMac::LoopbackRecordingError() const
  3271. {
  3272. return _loopbackrecError;
  3273. }
  3274. void AudioDeviceMac::ClearPlayoutWarning()
  3275. {
  3276. _playWarning = 0;
  3277. }
  3278. void AudioDeviceMac::ClearPlayoutError()
  3279. {
  3280. _playError = 0;
  3281. }
  3282. void AudioDeviceMac::ClearRecordingWarning()
  3283. {
  3284. _recWarning = 0;
  3285. }
  3286. void AudioDeviceMac::ClearRecordingError()
  3287. {
  3288. _recError = 0;
  3289. }
  3290. void AudioDeviceMac::ClearLoopbackRecordingError()
  3291. {
  3292. _loopbackrecError = 0;
  3293. }
  3294. // ============================================================================
  3295. // Private Methods
  3296. // ============================================================================
  3297. #ifdef BUILD_FOR_MIMO
  3298. WebRtc_Word32 AudioDeviceMac::IsLowPriorityDevice(const AudioObjectPropertyScope scope,
  3299. AudioDeviceID DeviceId)
  3300. {
  3301. UInt32 transportType = 0;
  3302. AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyTransportType,scope, 0 };
  3303. UInt32 size = sizeof(UInt32);
  3304. OSStatus err = AudioObjectGetPropertyData(DeviceId,&propertyAddress, 0, NULL, &size, &transportType);
  3305. if (err == noErr)
  3306. {
  3307. if (transportType == 'usb ')
  3308. {
  3309. return 0;
  3310. }
  3311. else if (transportType == 'hdmi')
  3312. {
  3313. return 1;
  3314. }
  3315. else if(transportType == 'bltn')
  3316. {
  3317. return 2;
  3318. }
  3319. else
  3320. {
  3321. return 3;
  3322. }
  3323. }
  3324. return 3;
  3325. }
  3326. WebRtc_Word32
  3327. AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope,
  3328. AudioDeviceID scopedDeviceIds[],
  3329. const WebRtc_UWord32 deviceListLength,
  3330. WebRtc_UWord8 &ZoomAudioDeviceNum,
  3331. bool bCheckZoomAudioDeviceNum)
  3332. {
  3333. ZoomAudioDeviceNum = 0;
  3334. OSStatus err = noErr;
  3335. AudioObjectPropertyAddress propertyAddress = {
  3336. kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
  3337. kAudioObjectPropertyElementMaster };
  3338. UInt32 size = 0;
  3339. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize(kAudioObjectSystemObject,
  3340. &propertyAddress, 0, NULL, &size));
  3341. if (size == 0)
  3342. {
  3343. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  3344. "No devices");
  3345. return 0;
  3346. }
  3347. AudioDeviceID* deviceIds = (AudioDeviceID*) malloc(size);
  3348. UInt32 numberDevices = size / sizeof(AudioDeviceID);
  3349. AudioBufferList* bufferList = NULL;
  3350. UInt32 numberScopedDevices = 0;
  3351. AudioDeviceID localScopedDeviceIds[MaxNumberDevices];
  3352. UInt32 DevicePriorityList[MaxNumberDevices];
  3353. UInt32 scopedDeviceIdsIdx = 0;
  3354. memset(DevicePriorityList, 0, sizeof(DevicePriorityList));
  3355. bool listOK = true;
  3356. WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
  3357. &propertyAddress, 0, NULL, &size, deviceIds));
  3358. if (err != noErr)
  3359. {
  3360. listOK = false;
  3361. } else
  3362. {
  3363. propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration;
  3364. propertyAddress.mScope = scope;
  3365. propertyAddress.mElement = 0;
  3366. for (UInt32 i = 0; i < numberDevices; i++)
  3367. {
  3368. // Check for input channels
  3369. WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize(deviceIds[i],
  3370. &propertyAddress, 0, NULL, &size));
  3371. if (err == kAudioHardwareBadDeviceError)
  3372. {
  3373. // This device doesn't actually exist; continue iterating.
  3374. continue;
  3375. } else if (err != noErr)
  3376. {
  3377. listOK = false;
  3378. break;
  3379. }
  3380. bufferList = (AudioBufferList*) malloc(size);
  3381. WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(deviceIds[i],
  3382. &propertyAddress, 0, NULL, &size, bufferList));
  3383. if (err != noErr)
  3384. {
  3385. listOK = false;
  3386. break;
  3387. }
  3388. if (bufferList->mNumberBuffers > 0)
  3389. {
  3390. if (numberScopedDevices >= deviceListLength)
  3391. {
  3392. WEBRTC_TRACE(kTraceError,
  3393. kTraceAudioDevice, _id,
  3394. "Device list is not long enough");
  3395. listOK = false;
  3396. break;
  3397. }
  3398. if(deviceIds[i] != kAudioDeviceUnknown)
  3399. {
  3400. size = 0;
  3401. DevicePriorityList[numberScopedDevices] = 1 + IsLowPriorityDevice(scope, deviceIds[i]);
  3402. AudioObjectPropertyAddress propertyAddressTemp = {
  3403. kAudioDevicePropertyStreamConfiguration,
  3404. (scope == kAudioDevicePropertyScopeOutput) ? kAudioDevicePropertyScopeInput : kAudioDevicePropertyScopeOutput,
  3405. 0 };
  3406. err = AudioObjectGetPropertyDataSize(deviceIds[i],&propertyAddressTemp, 0, NULL, &size);
  3407. if (err == noErr && size > 0)
  3408. {
  3409. AudioBufferList* bufferListTmp = NULL;
  3410. bufferListTmp = (AudioBufferList*) malloc(size);
  3411. if(noErr == AudioObjectGetPropertyData(deviceIds[i],&propertyAddressTemp, 0, NULL, &size, bufferListTmp))
  3412. {
  3413. if (bufferListTmp->mNumberBuffers > 0)
  3414. {
  3415. DevicePriorityList[numberScopedDevices] -= 1;
  3416. }
  3417. }
  3418. if (bufferListTmp)
  3419. {
  3420. free(bufferListTmp);
  3421. }
  3422. }
  3423. WEBRTC_TRACE(kTraceInfo,
  3424. kTraceAudioDevice, _id,
  3425. "index = %d, DeviceId = %d, scope = %d, priority = %d", i,deviceIds[i],(scope == kAudioDevicePropertyScopeOutput) ? 0 : 1 ,DevicePriorityList[numberScopedDevices]);
  3426. localScopedDeviceIds[numberScopedDevices] = deviceIds[i];
  3427. numberScopedDevices++;
  3428. }
  3429. }
  3430. free(bufferList);
  3431. bufferList = NULL;
  3432. } // for
  3433. }
  3434. if (!listOK)
  3435. {
  3436. if (deviceIds)
  3437. {
  3438. free(deviceIds);
  3439. deviceIds = NULL;
  3440. }
  3441. if (bufferList)
  3442. {
  3443. free(bufferList);
  3444. bufferList = NULL;
  3445. }
  3446. return -1;
  3447. }
  3448. // Happy ending
  3449. if (deviceIds)
  3450. {
  3451. free(deviceIds);
  3452. deviceIds = NULL;
  3453. }
  3454. if (numberScopedDevices == 0)
  3455. {
  3456. return 0;
  3457. }
  3458. for (UInt32 DevicePriority = 0; DevicePriority < 5; DevicePriority++)
  3459. {
  3460. for (int i = 0; i < numberScopedDevices; i++)
  3461. {
  3462. if (DevicePriority == DevicePriorityList[i])
  3463. {
  3464. scopedDeviceIds[scopedDeviceIdsIdx] = localScopedDeviceIds[i];
  3465. scopedDeviceIdsIdx++;
  3466. }
  3467. }
  3468. }
  3469. if(bCheckZoomAudioDeviceNum)
  3470. {
  3471. char deviceFriendName[kAdmMaxDeviceNameSize];
  3472. int i = 0;
  3473. for (; i < numberScopedDevices; i++)
  3474. {
  3475. memset(deviceFriendName, 0, sizeof(deviceFriendName));
  3476. GetDeviceFriendName(scope, scopedDeviceIds[i], deviceFriendName);
  3477. if ((strstr(deviceFriendName, ZoomAudioDeviceName2) != 0) || (strstr(deviceFriendName,BlackmagicAudioName) != 0)/* || (strstr(deviceFriendName,MagewellAudioName) != 0)*/
  3478. || (strncmp(deviceFriendName,ZoomAudioDeviceName,strlen(ZoomAudioDeviceName)) == 0))
  3479. {
  3480. ZoomAudioDeviceNum++;
  3481. }
  3482. }
  3483. }
  3484. return numberScopedDevices;
  3485. }
  3486. #else
  3487. WebRtc_Word32 AudioDeviceMac::IsLowPriorityDevice(const AudioObjectPropertyScope scope,
  3488. AudioDeviceID DeviceId)
  3489. {
  3490. UInt32 transportType = 0;
  3491. AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyTransportType,scope, 0 };
  3492. UInt32 size = sizeof(UInt32);
  3493. OSStatus err = AudioObjectGetPropertyData(DeviceId,&propertyAddress, 0, NULL, &size, &transportType);
  3494. if (err == noErr)
  3495. {
  3496. if (transportType == 'virt' || transportType == 'ntwk' || transportType == 'airp' || transportType == 0)
  3497. {
  3498. return 1;
  3499. }
  3500. }
  3501. if (scope == kAudioDevicePropertyScopeInput)
  3502. {
  3503. char deviceFriendName[kAdmMaxDeviceNameSize];
  3504. memset(deviceFriendName, 0, sizeof(deviceFriendName));
  3505. const char* BuiltInAudioName = "Built-in Input";
  3506. GetDeviceFriendName(scope, DeviceId, deviceFriendName);
  3507. if (strncmp(deviceFriendName, BuiltInAudioName,strlen(BuiltInAudioName)) == 0)
  3508. {
  3509. return 1;
  3510. }
  3511. }
  3512. return 0;
  3513. }
  3514. WebRtc_Word32
  3515. AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope,
  3516. AudioDeviceID scopedDeviceIds[],
  3517. const WebRtc_UWord32 deviceListLength,
  3518. WebRtc_UWord8 &ZoomAudioDeviceNum,
  3519. bool bCheckZoomAudioDeviceNum)
  3520. {
  3521. ZoomAudioDeviceNum = 0;
  3522. OSStatus err = noErr;
  3523. AudioObjectPropertyAddress propertyAddress = {
  3524. kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
  3525. kAudioObjectPropertyElementMaster };
  3526. UInt32 size = 0;
  3527. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize(kAudioObjectSystemObject,
  3528. &propertyAddress, 0, NULL, &size));
  3529. if (size == 0)
  3530. {
  3531. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  3532. "No devices");
  3533. return 0;
  3534. }
  3535. AudioDeviceID* deviceIds = (AudioDeviceID*) malloc(size);
  3536. UInt32 numberDevices = size / sizeof(AudioDeviceID);
  3537. WEBRTC_TRACE(kTraceWarning,kTraceAudioDevice,_id,"GetNumberDevices, size = %d, numberDevices = %d",size,numberDevices);
  3538. AudioBufferList* bufferList = NULL;
  3539. UInt32 numberScopedDevices = 0;
  3540. AudioDeviceID localScopedDeviceIds[MaxNumberDevices];
  3541. WebRtc_Word32 bLowPriorityDevice[MaxNumberDevices];
  3542. UInt32 scopedDeviceIdsIdx = 0;
  3543. memset(bLowPriorityDevice, 0, sizeof(bLowPriorityDevice));
  3544. // First check if there is a default device and list it
  3545. UInt32 hardwareProperty = 0;
  3546. if (scope == kAudioDevicePropertyScopeOutput)
  3547. {
  3548. hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
  3549. } else
  3550. {
  3551. hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
  3552. }
  3553. AudioObjectPropertyAddress
  3554. propertyAddressDefault = { hardwareProperty,
  3555. kAudioObjectPropertyScopeGlobal,
  3556. kAudioObjectPropertyElementMaster };
  3557. AudioDeviceID usedID = kAudioDeviceUnknown;
  3558. UInt32 uintSize = sizeof(UInt32);
  3559. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
  3560. &propertyAddressDefault, 0, NULL, &uintSize, &usedID));
  3561. if (usedID != kAudioDeviceUnknown)
  3562. {
  3563. localScopedDeviceIds[numberScopedDevices] = usedID;
  3564. bLowPriorityDevice[numberScopedDevices] = IsLowPriorityDevice(scope, usedID);
  3565. numberScopedDevices++;
  3566. } else
  3567. {
  3568. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  3569. "GetNumberDevices(): Default device unknown");
  3570. }
  3571. // Then list the rest of the devices
  3572. bool listOK = true;
  3573. WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
  3574. &propertyAddress, 0, NULL, &size, deviceIds));
  3575. if (err != noErr)
  3576. {
  3577. listOK = false;
  3578. } else
  3579. {
  3580. propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration;
  3581. propertyAddress.mScope = scope;
  3582. propertyAddress.mElement = 0;
  3583. for (UInt32 i = 0; i < numberDevices; i++)
  3584. {
  3585. WEBRTC_TRACE(kTraceWarning,kTraceAudioDevice,_id,"GetNumberDevices i = %d, id = %d,numberDevices = %d,numberScopedDevices = %d",i,deviceIds[i],numberDevices,numberScopedDevices);
  3586. // Check for input channels
  3587. WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize(deviceIds[i],
  3588. &propertyAddress, 0, NULL, &size));
  3589. if (err == kAudioHardwareBadDeviceError)
  3590. {
  3591. // This device doesn't actually exist; continue iterating.
  3592. continue;
  3593. } else if (err != noErr)
  3594. {
  3595. listOK = false;
  3596. break;
  3597. }
  3598. bufferList = (AudioBufferList*) malloc(size);
  3599. WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(deviceIds[i],
  3600. &propertyAddress, 0, NULL, &size, bufferList));
  3601. if (err != noErr)
  3602. {
  3603. listOK = false;
  3604. break;
  3605. }
  3606. if (bufferList->mNumberBuffers > 0)
  3607. {
  3608. if (numberScopedDevices >= deviceListLength)
  3609. {
  3610. WEBRTC_TRACE(kTraceError,
  3611. kTraceAudioDevice, _id,
  3612. "Device list is not long enough");
  3613. listOK = false;
  3614. break;
  3615. }
  3616. if((deviceIds[i] != kAudioDeviceUnknown) && (deviceIds[i] != usedID))
  3617. {
  3618. localScopedDeviceIds[numberScopedDevices] = deviceIds[i];
  3619. bLowPriorityDevice[numberScopedDevices] = IsLowPriorityDevice(scope, deviceIds[i]);
  3620. numberScopedDevices++;
  3621. }
  3622. }
  3623. free(bufferList);
  3624. bufferList = NULL;
  3625. } // for
  3626. }
  3627. if (!listOK)
  3628. {
  3629. if (deviceIds)
  3630. {
  3631. free(deviceIds);
  3632. deviceIds = NULL;
  3633. }
  3634. if (bufferList)
  3635. {
  3636. free(bufferList);
  3637. bufferList = NULL;
  3638. }
  3639. return -1;
  3640. }
  3641. // Happy ending
  3642. if (deviceIds)
  3643. {
  3644. free(deviceIds);
  3645. deviceIds = NULL;
  3646. }
  3647. if (numberScopedDevices == 0)
  3648. {
  3649. return 0;
  3650. }
  3651. for (int i = 0; i < numberScopedDevices; i++)
  3652. {
  3653. if (0 == bLowPriorityDevice[i])
  3654. {
  3655. scopedDeviceIds[scopedDeviceIdsIdx] = localScopedDeviceIds[i];
  3656. scopedDeviceIdsIdx++;
  3657. }
  3658. }
  3659. for (int i = 0; i < numberScopedDevices; i++)
  3660. {
  3661. if (1 == bLowPriorityDevice[i])
  3662. {
  3663. scopedDeviceIds[scopedDeviceIdsIdx] = localScopedDeviceIds[i];
  3664. scopedDeviceIdsIdx++;
  3665. }
  3666. }
  3667. if(bCheckZoomAudioDeviceNum)
  3668. {
  3669. char deviceFriendName[kAdmMaxDeviceNameSize];
  3670. int i = 0;
  3671. for (; i < numberScopedDevices; i++)
  3672. {
  3673. memset(deviceFriendName, 0, sizeof(deviceFriendName));
  3674. GetDeviceFriendName(scope, scopedDeviceIds[i], deviceFriendName);
  3675. if (strstr(deviceFriendName, ZoomAudioDeviceName2) != 0 || (strncmp(deviceFriendName,ZoomAudioDeviceName,strlen(ZoomAudioDeviceName)) == 0))
  3676. {
  3677. ZoomAudioDeviceNum++;
  3678. }
  3679. }
  3680. }
  3681. return numberScopedDevices;
  3682. }
  3683. #endif
  3684. WebRtc_Word32
  3685. AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope,
  3686. WebRtc_UWord16 index,
  3687. char* name, char* deviceID)
  3688. {
  3689. OSStatus err = noErr;
  3690. UInt32 len = kAdmMaxDeviceNameSize;
  3691. AudioDeviceID deviceIds[MaxNumberDevices];
  3692. char deviceNames[MaxNumberDevices][kAdmMaxDeviceNameSize];
  3693. WebRtc_UWord8 ZoomAudioDeviceNum = 0;
  3694. int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices,ZoomAudioDeviceNum);
  3695. if (numberDevices < 0)
  3696. {
  3697. return -1;
  3698. } else if (numberDevices == 0)
  3699. {
  3700. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  3701. "No devices");
  3702. return -1;
  3703. }
  3704. if (index == (WebRtc_UWord16)-1)
  3705. {
  3706. index = 0;
  3707. AudioDeviceID usedID = kAudioDeviceUnknown;
  3708. // Check if there is a default device
  3709. bool isDefaultDevice = false;
  3710. UInt32 hardwareProperty = 0;
  3711. if (scope == kAudioDevicePropertyScopeOutput)
  3712. {
  3713. hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
  3714. } else
  3715. {
  3716. hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
  3717. }
  3718. AudioObjectPropertyAddress propertyAddress = { hardwareProperty,
  3719. kAudioObjectPropertyScopeGlobal,
  3720. kAudioObjectPropertyElementMaster };
  3721. UInt32 size = sizeof(UInt32);
  3722. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
  3723. &propertyAddress, 0, NULL, &size, &usedID));
  3724. if (usedID == kAudioDeviceUnknown)
  3725. {
  3726. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  3727. "GetDeviceName(): Default device unknown");
  3728. } else
  3729. {
  3730. for (int i = 0; i < numberDevices; i++)
  3731. {
  3732. if (usedID == deviceIds[i])
  3733. {
  3734. index = i;
  3735. break;
  3736. }
  3737. }
  3738. isDefaultDevice = true;
  3739. }
  3740. }
  3741. else if(index >= numberDevices)
  3742. {
  3743. return -1;
  3744. }
  3745. // If the number is below the number of devices, assume it's "WEBRTC ID"
  3746. // otherwise assume it's a CoreAudio ID
  3747. AudioDeviceID usedID = deviceIds[index];
  3748. AudioObjectPropertyAddress propertyAddress = {
  3749. kAudioDevicePropertyDeviceName, scope, 0 };
  3750. for (int i = 0; i < numberDevices; i++)
  3751. {
  3752. UInt32 len = kAdmMaxDeviceNameSize;
  3753. AudioObjectGetPropertyData( deviceIds[i],
  3754. &propertyAddress, 0, NULL, &len, deviceNames[i]);
  3755. }
  3756. /*
  3757. if (isDefaultDevice)
  3758. {
  3759. if (scope == kAudioDevicePropertyScopeOutput)
  3760. {
  3761. sprintf(name, "Default Speaker");
  3762. } else
  3763. {
  3764. sprintf(name, "Default Microphone");
  3765. }
  3766. }
  3767. else
  3768. */
  3769. UInt32 transportType = 0;
  3770. AudioObjectPropertyAddress propertyAddresstransportType = { kAudioDevicePropertyTransportType,scope, 0 };
  3771. UInt32 size = sizeof(UInt32);
  3772. AudioObjectGetPropertyData(usedID,&propertyAddresstransportType, 0, NULL, &size, &transportType);
  3773. {
  3774. if (index < numberDevices)
  3775. {
  3776. memcpy(name, deviceNames[index], sizeof(deviceNames[index]));
  3777. }
  3778. uint16_t sameDeviceNameCount = 0;
  3779. for (int i = 0; i < index; i++)
  3780. {
  3781. if (strcmp(name,deviceNames[i]) == 0)
  3782. {
  3783. sameDeviceNameCount++;
  3784. }
  3785. }
  3786. if (sameDeviceNameCount != 0)
  3787. {
  3788. char sourceName[128];
  3789. memset(sourceName,0,128);
  3790. sprintf(sourceName,"#%d",sameDeviceNameCount);
  3791. if ((strlen(name) + strlen(sourceName)) < kAdmMaxDeviceNameSize)
  3792. {
  3793. strcat(name,sourceName);
  3794. }
  3795. }
  3796. if (transportType == 'bltn')
  3797. {
  3798. memcpy(deviceID,name,kAdmMaxDeviceNameSize);
  3799. }
  3800. propertyAddress.mSelector = kAudioDevicePropertyDataSource;
  3801. Boolean hasProperty = AudioObjectHasProperty(usedID,
  3802. &propertyAddress);
  3803. if(hasProperty)
  3804. {
  3805. UInt32 dataSource = 0;
  3806. UInt32 size = sizeof(dataSource);
  3807. if(noErr == AudioObjectGetPropertyData(usedID,
  3808. &propertyAddress, 0, NULL, &size, &dataSource))
  3809. {
  3810. AudioValueTranslation trans;
  3811. CFStringRef str = NULL;
  3812. Boolean ok;
  3813. trans.mInputData = &dataSource;
  3814. trans.mInputDataSize = sizeof(UInt32);
  3815. trans.mOutputData = &str;
  3816. trans.mOutputDataSize = sizeof(CFStringRef);
  3817. propertyAddress.mSelector = kAudioDevicePropertyDataSourceNameForIDCFString;
  3818. size = sizeof(AudioValueTranslation);
  3819. if(AudioObjectGetPropertyData(usedID,
  3820. &propertyAddress,
  3821. 0,
  3822. NULL,
  3823. &size,
  3824. &trans)==noErr)
  3825. {
  3826. char sourceName[128];
  3827. if(str != NULL && CFStringGetCString(str, sourceName, 128, kCFStringEncodingUTF8))
  3828. {
  3829. if ((strlen(name) + strlen(sourceName) + 3) < kAdmMaxDeviceNameSize)
  3830. {
  3831. strcat(name, " (");
  3832. strcat(name, sourceName);
  3833. strcat(name, ")");
  3834. }
  3835. }
  3836. }
  3837. if(str)
  3838. CFRelease(str);
  3839. }
  3840. }
  3841. }
  3842. name[kAdmMaxDeviceNameSize - 1] = '\0';
  3843. if (transportType != 'bltn')
  3844. {
  3845. memcpy(deviceID,name,kAdmMaxDeviceNameSize);
  3846. }
  3847. /// mac change id when usb device plug out and in. so here use name
  3848. /*
  3849. if( deviceID )
  3850. {
  3851. sprintf(deviceID, "%d", (int)usedID);
  3852. }
  3853. */
  3854. return 0;
  3855. }
  3856. WebRtc_Word32 AudioDeviceMac::InitDevice(WebRtc_UWord16 userDeviceIndex,
  3857. AudioDeviceID& deviceId,
  3858. const bool isInput)
  3859. {
  3860. OSStatus err = noErr;
  3861. UInt32 size = 0;
  3862. AudioObjectPropertyScope deviceScope;
  3863. AudioObjectPropertySelector defaultDeviceSelector;
  3864. AudioDeviceID deviceIds[MaxNumberDevices];
  3865. if (isInput)
  3866. {
  3867. deviceScope = kAudioDevicePropertyScopeInput;
  3868. defaultDeviceSelector = kAudioHardwarePropertyDefaultInputDevice;
  3869. } else
  3870. {
  3871. deviceScope = kAudioDevicePropertyScopeOutput;
  3872. defaultDeviceSelector = kAudioHardwarePropertyDefaultOutputDevice;
  3873. }
  3874. AudioObjectPropertyAddress
  3875. propertyAddress = { defaultDeviceSelector,
  3876. kAudioObjectPropertyScopeGlobal,
  3877. kAudioObjectPropertyElementMaster };
  3878. /*
  3879. // Get the actual device IDs
  3880. WebRtc_UWord8 ZoomAudioDeviceNum = 0;
  3881. int numberDevices = GetNumberDevices(deviceScope, deviceIds,
  3882. MaxNumberDevices,ZoomAudioDeviceNum);
  3883. if (numberDevices > 0)
  3884. {
  3885. numberDevices = CheckAndRemoveZoomDevice(deviceScope, deviceIds, numberDevices);
  3886. }
  3887. if (numberDevices < 0)
  3888. {
  3889. return -1;
  3890. } else if (numberDevices == 0)
  3891. {
  3892. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  3893. "InitDevice(): No devices");
  3894. return -1;
  3895. }
  3896. */
  3897. char deviceGUIDs1[MaxNumberDevices][kAdmMaxDeviceNameSize];
  3898. char deviceNames1[MaxNumberDevices][kAdmMaxDeviceNameSize];
  3899. int numberDevices = MaxNumberDevices;
  3900. AllRelistDevice(deviceScope,deviceNames1,deviceGUIDs1,deviceIds,numberDevices);
  3901. if(numberDevices <= 0)
  3902. {
  3903. return -1;
  3904. }
  3905. bool isDefaultDevice = false;
  3906. deviceId = kAudioDeviceUnknown;
  3907. if (userDeviceIndex == (WebRtc_UWord16)-1)
  3908. {
  3909. userDeviceIndex = 0;
  3910. // Try to use default system device
  3911. size = sizeof(AudioDeviceID);
  3912. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
  3913. &propertyAddress, 0, NULL, &size, &deviceId));
  3914. if (deviceId == kAudioDeviceUnknown)
  3915. {
  3916. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  3917. " No default device exists");
  3918. }
  3919. else
  3920. {
  3921. if (_SystemDefaultSpeakerID != kAudioDeviceUnknown)
  3922. {
  3923. char devName[128];
  3924. memset(devName, 0, sizeof(devName));
  3925. if(GetDeviceFriendName(deviceScope,deviceId,devName))
  3926. {
  3927. if (strncmp(devName,ZoomAudioDeviceName,strlen(ZoomAudioDeviceName)) == 0)
  3928. {
  3929. deviceId = _SystemDefaultSpeakerID;
  3930. }
  3931. }
  3932. }
  3933. isDefaultDevice = true;
  3934. for (int i = 0; i < numberDevices; i++)
  3935. {
  3936. if (deviceId == deviceIds[i])
  3937. {
  3938. userDeviceIndex = i;
  3939. break;
  3940. }
  3941. }
  3942. }
  3943. }
  3944. else if(userDeviceIndex >= numberDevices || userDeviceIndex < 0)
  3945. {
  3946. userDeviceIndex = 0;
  3947. }
  3948. deviceId = deviceIds[userDeviceIndex];
  3949. // Obtain device name and manufacturer for logging.
  3950. // Also use this as a test to ensure a user-set device ID is valid.
  3951. /*
  3952. char devName[128];
  3953. char devManf[128];
  3954. char devGUID[128];
  3955. memset(devName, 0, sizeof(devName));
  3956. memset(devManf, 0, sizeof(devManf));
  3957. memset(devGUID, 0, sizeof(devGUID));
  3958. WebRtc_UWord16 deviceIndex = userDeviceIndex;
  3959. CheckAndIncreaseZoomDevice(deviceScope, numberDevices,deviceIndex);
  3960. GetDeviceName(deviceScope,deviceIndex,devName,devGUID);
  3961. */
  3962. /*
  3963. propertyAddress.mSelector = kAudioDevicePropertyDeviceName;
  3964. propertyAddress.mScope = deviceScope;
  3965. propertyAddress.mElement = 0;
  3966. size = sizeof(devName);
  3967. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId,
  3968. &propertyAddress, 0, NULL, &size, devName));
  3969. propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer;
  3970. size = sizeof(devManf);
  3971. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId,
  3972. &propertyAddress, 0, NULL, &size, devManf));
  3973. */
  3974. if (isInput)
  3975. {
  3976. memcpy(_inputDevName,deviceNames1[userDeviceIndex],sizeof(_inputDevName));
  3977. memcpy(_inputDevGuid,deviceGUIDs1[userDeviceIndex],sizeof(_inputDevGuid));
  3978. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  3979. " Input device: %s %s", _inputDevName, _inputDevGuid);
  3980. } else
  3981. {
  3982. memcpy(_outputDevName,deviceNames1[userDeviceIndex],sizeof(_outputDevName));
  3983. memcpy(_outputDevGuid,deviceGUIDs1[userDeviceIndex],sizeof(_outputDevGuid));
  3984. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  3985. " Output device: %s %s", _outputDevName, _outputDevGuid);
  3986. }
  3987. return 0;
  3988. }
  3989. OSStatus AudioDeviceMac::objectListenerProc(
  3990. AudioObjectID objectId,
  3991. UInt32 numberAddresses,
  3992. const AudioObjectPropertyAddress addresses[],
  3993. void* clientData)
  3994. {
  3995. AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
  3996. assert(ptrThis != NULL);
  3997. ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses);
  3998. // AudioObjectPropertyListenerProc functions are supposed to return 0
  3999. return 0;
  4000. }
  4001. OSStatus AudioDeviceMac::implObjectListenerProc(
  4002. const AudioObjectID objectId,
  4003. const UInt32 numberAddresses,
  4004. const AudioObjectPropertyAddress addresses[])
  4005. {
  4006. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  4007. "AudioDeviceMac::implObjectListenerProc()");
  4008. for (UInt32 i = 0; i < numberAddresses; i++)
  4009. {
  4010. if (addresses[i].mSelector == kAudioHardwarePropertyDevices)
  4011. {
  4012. HandleDeviceChange();
  4013. } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat)
  4014. {
  4015. HandleStreamFormatChange(objectId, addresses[i]);
  4016. } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource)
  4017. {
  4018. HandleDataSourceChange(objectId, addresses[i]);
  4019. } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload)
  4020. {
  4021. HandleProcessorOverload(addresses[i]);
  4022. }
  4023. else if (addresses[i].mSelector == kAudioDevicePropertyVolumeScalar)
  4024. {
  4025. HandleVolumeChange(objectId,addresses[i]);
  4026. }
  4027. }
  4028. return 0;
  4029. }
  4030. WebRtc_Word32 AudioDeviceMac::HandleDeviceChange()
  4031. {
  4032. OSStatus err = noErr;
  4033. WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
  4034. "kAudioHardwarePropertyDevices");
  4035. // A device has changed. Check if our registered devices have been removed.
  4036. // Ensure the devices have been initialized, meaning the IDs are valid.
  4037. if (MicrophoneIsInitialized())
  4038. {
  4039. AudioObjectPropertyAddress propertyAddress = {
  4040. kAudioDevicePropertyDeviceIsAlive,
  4041. kAudioDevicePropertyScopeInput, 0 };
  4042. UInt32 deviceIsAlive = 1;
  4043. UInt32 size = sizeof(UInt32);
  4044. err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0,
  4045. NULL, &size, &deviceIsAlive);
  4046. if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0)
  4047. {
  4048. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4049. "Capture device is not alive (probably removed)");
  4050. AtomicSet32(&_captureDeviceIsAlive, 0);
  4051. _mixerManager.CloseMicrophone();
  4052. if (_recError == 1)
  4053. {
  4054. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
  4055. _id, " pending recording error exists");
  4056. }
  4057. // _recError = 1; // triggers callback from module process thread
  4058. } else if (err != noErr)
  4059. {
  4060. logCAMsg(kTraceError, kTraceAudioDevice, _id,
  4061. "Error in AudioDeviceGetProperty()", (const char*) &err);
  4062. return -1;
  4063. }
  4064. }
  4065. if (SpeakerIsInitialized())
  4066. {
  4067. AudioObjectPropertyAddress propertyAddress = {
  4068. kAudioDevicePropertyDeviceIsAlive,
  4069. kAudioDevicePropertyScopeOutput, 0 };
  4070. UInt32 deviceIsAlive = 1;
  4071. UInt32 size = sizeof(UInt32);
  4072. err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0,
  4073. NULL, &size, &deviceIsAlive);
  4074. if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0)
  4075. {
  4076. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4077. "Render device is not alive (probably removed)");
  4078. AtomicSet32(&_renderDeviceIsAlive, 0);
  4079. _mixerManager.CloseSpeaker();
  4080. if (_playError == 1)
  4081. {
  4082. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
  4083. _id, " pending playout error exists");
  4084. }
  4085. // _playError = 1; // triggers callback from module process thread
  4086. } else if (err != noErr)
  4087. {
  4088. logCAMsg(kTraceError, kTraceAudioDevice, _id,
  4089. "Error in AudioDeviceGetProperty()", (const char*) &err);
  4090. return -1;
  4091. }
  4092. }
  4093. return 0;
  4094. }
  4095. WebRtc_Word32 AudioDeviceMac::HandleStreamFormatChange(
  4096. const AudioObjectID objectId,
  4097. const AudioObjectPropertyAddress propertyAddress)
  4098. {
  4099. OSStatus err = noErr;
  4100. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4101. "Stream format changed");
  4102. if (objectId != _inputDeviceID && objectId != _outputDeviceID)
  4103. {
  4104. return 0;
  4105. }
  4106. // Get the new device format
  4107. AudioStreamBasicDescription streamFormat;
  4108. UInt32 size = sizeof(streamFormat);
  4109. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(objectId,
  4110. &propertyAddress, 0, NULL, &size, &streamFormat));
  4111. if (streamFormat.mFormatID != kAudioFormatLinearPCM)
  4112. {
  4113. logCAMsg(kTraceError, kTraceAudioDevice, _id,
  4114. "Unacceptable input stream format -> mFormatID",
  4115. (const char *) &streamFormat.mFormatID);
  4116. return -1;
  4117. }
  4118. if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS)
  4119. {
  4120. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  4121. "Too many channels on device (mChannelsPerFrame = %d)",
  4122. streamFormat.mChannelsPerFrame);
  4123. return -1;
  4124. }
  4125. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4126. "Stream format:");
  4127. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4128. "mSampleRate = %f, mChannelsPerFrame = %u",
  4129. streamFormat.mSampleRate, streamFormat.mChannelsPerFrame);
  4130. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4131. "mBytesPerPacket = %u, mFramesPerPacket = %u",
  4132. streamFormat.mBytesPerPacket, streamFormat.mFramesPerPacket);
  4133. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4134. "mBytesPerFrame = %u, mBitsPerChannel = %u",
  4135. streamFormat.mBytesPerFrame, streamFormat.mBitsPerChannel);
  4136. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4137. "mFormatFlags = %u, mChannelsPerFrame = %u",
  4138. streamFormat.mFormatFlags, streamFormat.mChannelsPerFrame);
  4139. logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
  4140. (const char *) &streamFormat.mFormatID);
  4141. if (propertyAddress.mScope == kAudioDevicePropertyScopeInput)
  4142. {
  4143. _critSectCb.Enter();
  4144. if (_critSectFormatChange)
  4145. {
  4146. _critSectFormatChange->Enter();
  4147. }
  4148. memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat));
  4149. #ifdef MUTI_MICROPHONE_SUPPORT
  4150. if(_bMutilChannelsMic)
  4151. {
  4152. _inDesiredFormat.mChannelsPerFrame = _inStreamFormat.mChannelsPerFrame;
  4153. _recChannels = _inDesiredFormat.mChannelsPerFrame;
  4154. }
  4155. else
  4156. #endif
  4157. {
  4158. if (_inStreamFormat.mChannelsPerFrame > 2)
  4159. {
  4160. _inDesiredFormat.mChannelsPerFrame = _inStreamFormat.mChannelsPerFrame;
  4161. _recChannels = 1;
  4162. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4163. "mutil-channel recording on this device");
  4164. }
  4165. else if (_inStreamFormat.mChannelsPerFrame == 2 && (_recChannels == 2))
  4166. {
  4167. _inDesiredFormat.mChannelsPerFrame = 2;
  4168. }
  4169. else
  4170. {
  4171. // Disable stereo recording when we only have one channel on the device.
  4172. _inDesiredFormat.mChannelsPerFrame = 1;
  4173. _recChannels = 1;
  4174. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4175. "Stereo recording unavailable on this device");
  4176. }
  4177. }
  4178. if (_ptrAudioBuffer)
  4179. {
  4180. // Update audio buffer with the selected parameters
  4181. _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
  4182. _ptrAudioBuffer->SetRecordingChannels((WebRtc_UWord8) _recChannels);
  4183. }
  4184. _inDesiredFormat.mBytesPerPacket = _inDesiredFormat.mChannelsPerFrame
  4185. * sizeof(SInt16);
  4186. _inDesiredFormat.mBytesPerFrame = _inDesiredFormat.mChannelsPerFrame
  4187. * sizeof(SInt16);
  4188. _inDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
  4189. | kLinearPCMFormatFlagIsPacked;
  4190. #ifdef WEBRTC_BIG_ENDIAN
  4191. _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
  4192. #endif
  4193. _inDesiredFormat.mFormatID = kAudioFormatLinearPCM;
  4194. // Recreate the converter with the new format
  4195. // TODO(xians): make this thread safe
  4196. WEBRTC_CA_LOG_ERR(AudioConverterDispose(_captureConverter));
  4197. WEBRTC_CA_LOG_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat,
  4198. &_captureConverter));
  4199. if (_critSectFormatChange)
  4200. {
  4201. _critSectFormatChange->Leave();
  4202. }
  4203. UInt32 bufByteCount = (UInt32)((_inStreamFormat.mSampleRate / 1000.0)
  4204. * 10.0 * N_BLOCKS_IO * _inStreamFormat.mChannelsPerFrame
  4205. * sizeof(Float32));
  4206. if (_inStreamFormat.mFramesPerPacket != 0)
  4207. {
  4208. if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0)
  4209. {
  4210. bufByteCount = ((UInt32)(bufByteCount
  4211. / _inStreamFormat.mFramesPerPacket) + 1)
  4212. * _inStreamFormat.mFramesPerPacket;
  4213. }
  4214. }
  4215. // Ensure the buffer size is within the acceptable range provided by the device.
  4216. AudioObjectPropertyAddress
  4217. propertyAddress = { kAudioDevicePropertyDataSource,kAudioDevicePropertyScopeInput, 0 };
  4218. propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
  4219. AudioValueRange range;
  4220. size = sizeof(range);
  4221. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
  4222. &propertyAddress, 0, NULL, &size, &range));
  4223. if (range.mMinimum > bufByteCount)
  4224. {
  4225. bufByteCount = range.mMinimum;
  4226. } else if (range.mMaximum < bufByteCount)
  4227. {
  4228. bufByteCount = range.mMaximum;
  4229. }
  4230. propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
  4231. size = sizeof(bufByteCount);
  4232. WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_inputDeviceID,
  4233. &propertyAddress, 0, NULL, size, &bufByteCount));
  4234. // Get capture device latency
  4235. propertyAddress.mSelector = kAudioDevicePropertyLatency;
  4236. UInt32 latency = 0;
  4237. size = sizeof(UInt32);
  4238. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
  4239. &propertyAddress, 0, NULL, &size, &latency));
  4240. _captureLatencyUs = (UInt32)((1.0e6 * latency)
  4241. / _inStreamFormat.mSampleRate);
  4242. #ifdef TRACKDEVICEDELAY
  4243. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4244. "capture report delay kAudioDevicePropertyLatency = %d",_captureLatencyUs);
  4245. #endif
  4246. // Get capture stream latency
  4247. propertyAddress.mSelector = kAudioDevicePropertyStreams;
  4248. AudioStreamID stream = 0;
  4249. size = sizeof(AudioStreamID);
  4250. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
  4251. &propertyAddress, 0, NULL, &size, &stream));
  4252. propertyAddress.mSelector = kAudioStreamPropertyLatency;
  4253. size = sizeof(UInt32);
  4254. latency = 0;
  4255. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
  4256. &propertyAddress, 0, NULL, &size, &latency));
  4257. _captureLatencyUs += (UInt32)((1.0e6 * latency)
  4258. / _inStreamFormat.mSampleRate);
  4259. if (_captureLatencyUs/1000 > 50)
  4260. {
  4261. _captureLatencyUs = 0;
  4262. }
  4263. #ifdef TRACKDEVICEDELAY
  4264. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4265. "capture report delay kAudioStreamPropertyLatency = %d",(UInt32)((1.0e6 * latency)
  4266. / _inStreamFormat.mSampleRate));
  4267. #endif
  4268. _critSectCb.Leave();
  4269. } else
  4270. {
  4271. memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat));
  4272. _critSectCb.Enter();
  4273. _critSectPlayFormatChange.Enter();
  4274. if (_outStreamFormat.mChannelsPerFrame >= 2 && (_playChannels == 2))
  4275. {
  4276. _outDesiredFormat.mChannelsPerFrame = 2;
  4277. } else
  4278. {
  4279. // Disable stereo playout when we only have one channel on the device.
  4280. _outDesiredFormat.mChannelsPerFrame = 1;
  4281. _playChannels = 1;
  4282. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4283. "Stereo playout unavailable on this device");
  4284. }
  4285. if (_ptrAudioBuffer)
  4286. {
  4287. // Update audio buffer with the selected parameters
  4288. _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
  4289. _ptrAudioBuffer->SetPlayoutChannels((WebRtc_UWord8) _playChannels);
  4290. }
  4291. _renderDelayOffsetSamples = _renderBufSizeSamples - N_BUFFERS_OUT
  4292. * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame;
  4293. _outDesiredFormat.mBytesPerPacket = _outDesiredFormat.mChannelsPerFrame
  4294. * sizeof(SInt16);
  4295. _outDesiredFormat.mBytesPerFrame = _outDesiredFormat.mChannelsPerFrame
  4296. * sizeof(SInt16);
  4297. _outDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
  4298. | kLinearPCMFormatFlagIsPacked;
  4299. #ifdef WEBRTC_BIG_ENDIAN
  4300. _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
  4301. #endif
  4302. _outDesiredFormat.mFormatID = kAudioFormatLinearPCM;
  4303. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "play Stream format changed");
  4304. // Recreate the converter with the new format
  4305. // TODO(xians): make this thread safe
  4306. WEBRTC_CA_LOG_ERR(AudioConverterDispose(_renderConverter));
  4307. WEBRTC_CA_LOG_ERR(AudioConverterNew(&_outDesiredFormat, &streamFormat,
  4308. &_renderConverter));
  4309. UInt32 bufByteCount = (UInt32)((_outStreamFormat.mSampleRate / 1000.0)
  4310. * _playBufDelayFixed * _outStreamFormat.mChannelsPerFrame
  4311. * sizeof(Float32));
  4312. if (_outStreamFormat.mFramesPerPacket != 0)
  4313. {
  4314. if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0)
  4315. {
  4316. bufByteCount = ((UInt32)(bufByteCount
  4317. / _outStreamFormat.mFramesPerPacket) + 1)
  4318. * _outStreamFormat.mFramesPerPacket;
  4319. }
  4320. }
  4321. AudioObjectPropertyAddress
  4322. propertyAddress = { kAudioDevicePropertyDataSource,kAudioDevicePropertyScopeOutput, 0 };
  4323. // Ensure the buffer size is within the acceptable range provided by the device.
  4324. propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
  4325. AudioValueRange range;
  4326. size = sizeof(range);
  4327. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
  4328. &propertyAddress, 0, NULL, &size, &range));
  4329. if (range.mMinimum > bufByteCount)
  4330. {
  4331. bufByteCount = range.mMinimum;
  4332. } else if (range.mMaximum < bufByteCount)
  4333. {
  4334. bufByteCount = range.mMaximum;
  4335. }
  4336. propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
  4337. size = sizeof(bufByteCount);
  4338. WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_outputDeviceID,
  4339. &propertyAddress, 0, NULL, size, &bufByteCount));
  4340. // Get render device latency
  4341. propertyAddress.mSelector = kAudioDevicePropertyLatency;
  4342. UInt32 latency = 0;
  4343. size = sizeof(UInt32);
  4344. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
  4345. &propertyAddress, 0, NULL, &size, &latency));
  4346. _renderLatencyUs = (WebRtc_UWord32) ((1.0e6 * latency)
  4347. / _outStreamFormat.mSampleRate);
  4348. #ifdef TRACKDEVICEDELAY
  4349. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4350. "play report delay kAudioDevicePropertyLatency = %d",_renderLatencyUs);
  4351. #endif
  4352. // Get render stream latency
  4353. propertyAddress.mSelector = kAudioDevicePropertyStreams;
  4354. AudioStreamID stream = 0;
  4355. size = sizeof(AudioStreamID);
  4356. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
  4357. &propertyAddress, 0, NULL, &size, &stream));
  4358. propertyAddress.mSelector = kAudioStreamPropertyLatency;
  4359. size = sizeof(UInt32);
  4360. latency = 0;
  4361. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
  4362. &propertyAddress, 0, NULL, &size, &latency));
  4363. _renderLatencyUs += (WebRtc_UWord32) ((1.0e6 * latency)
  4364. / _outStreamFormat.mSampleRate);
  4365. #ifdef TRACKDEVICEDELAY
  4366. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4367. "play report delay kAudioStreamPropertyLatency = %d",(WebRtc_UWord32) ((1.0e6 * latency)
  4368. / _outStreamFormat.mSampleRate));
  4369. #endif
  4370. if (_renderLatencyUs/1000 > 50)
  4371. {
  4372. _renderLatencyUs = 0;
  4373. }
  4374. _critSectPlayFormatChange.Leave();
  4375. _critSectCb.Leave();
  4376. }
  4377. return 0;
  4378. }
  4379. WebRtc_Word32 AudioDeviceMac::HandleDataSourceChange(
  4380. const AudioObjectID objectId,
  4381. const AudioObjectPropertyAddress propertyAddress)
  4382. {
  4383. OSStatus err = noErr;
  4384. if (_macBookPro && propertyAddress.mScope
  4385. == kAudioDevicePropertyScopeOutput)
  4386. {
  4387. WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
  4388. "Data source changed");
  4389. _macBookProPanRight = false;
  4390. UInt32 dataSource = 0;
  4391. UInt32 size = sizeof(UInt32);
  4392. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(objectId,
  4393. &propertyAddress, 0, NULL, &size, &dataSource));
  4394. if (dataSource == 'ispk')
  4395. {
  4396. // _macBookProPanRight = true;
  4397. beep();//try to fix the bug that Internal Speaker stop work when plug out the Headphone from audio jack on MacBookPro installed OSX10.8
  4398. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  4399. "MacBook Pro using internal speakers; stereo panning right");
  4400. } else
  4401. {
  4402. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  4403. "MacBook Pro not using internal speakers");
  4404. }
  4405. }
  4406. return 0;
  4407. }
  4408. WebRtc_Word32 AudioDeviceMac::HandleProcessorOverload(
  4409. const AudioObjectPropertyAddress propertyAddress)
  4410. {
  4411. // TODO(xians): we probably want to notify the user in some way of the
  4412. // overload. However, the Windows interpretations of these errors seem to
  4413. // be more severe than what ProcessorOverload is thrown for.
  4414. //
  4415. // We don't log the notification, as it's sent from the HAL's IO thread. We
  4416. // don't want to slow it down even further.
  4417. if (propertyAddress.mScope == kAudioDevicePropertyScopeInput)
  4418. {
  4419. //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "Capture processor
  4420. // overload");
  4421. //_callback->ProblemIsReported(
  4422. // SndCardStreamObserver::ERecordingProblem);
  4423. } else
  4424. {
  4425. //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4426. // "Render processor overload");
  4427. //_callback->ProblemIsReported(
  4428. // SndCardStreamObserver::EPlaybackProblem);
  4429. }
  4430. return 0;
  4431. }
  4432. WebRtc_Word32 AudioDeviceMac::HandleVolumeChange(
  4433. const AudioObjectID objectId,
  4434. const AudioObjectPropertyAddress propertyAddress)
  4435. {
  4436. OSStatus err = noErr;
  4437. float volume = 0;
  4438. UInt32 dataSize = sizeof(volume);
  4439. err = AudioObjectGetPropertyData(objectId, &propertyAddress, 0, NULL, &dataSize, &volume);
  4440. if(noErr == err)
  4441. {
  4442. CriticalSectionScoped lock(_critSectNotify);
  4443. if (_pVolumeChangeNotify)
  4444. {
  4445. if (propertyAddress.mScope == kAudioDevicePropertyScopeOutput)
  4446. {
  4447. _pVolumeChangeNotify->OnRenderVolumeChange(volume *255,false,IAudioVolumeChangeNotify::kSystem);
  4448. }
  4449. else if (propertyAddress.mScope == kAudioDevicePropertyScopeInput)
  4450. {
  4451. _pVolumeChangeNotify->OnCaptureVolumeChange(volume *255,false,IAudioVolumeChangeNotify::kSystem);
  4452. }
  4453. }
  4454. }
  4455. return 0;
  4456. }
  4457. // ============================================================================
  4458. // Thread Methods
  4459. // ============================================================================
  4460. OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID device, const AudioTimeStamp*,
  4461. const AudioBufferList* inputData,
  4462. const AudioTimeStamp* inputTime,
  4463. AudioBufferList* outputData,
  4464. const AudioTimeStamp* outputTime,
  4465. void *clientData)
  4466. {
  4467. #ifdef DEVICE_THREAD_EXCEPTION
  4468. // WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
  4469. // " deviceIOProc device = %d,stopped = %d",device,RunSpeakerInfo.Stopped);
  4470. if (device == RunSpeakerInfo.DeviceID && RunSpeakerInfo.Stopped)
  4471. {
  4472. RunSpeakerInfo.errorCount++;
  4473. if(RunSpeakerInfo.errorCount >= 50 && AudioDeviceCreateIOProcID != NULL )
  4474. {
  4475. AudioDeviceStop(RunSpeakerInfo.DeviceID, RunSpeakerInfo.DeviceIOProcID);
  4476. AudioDeviceDestroyIOProcID(RunSpeakerInfo.DeviceID, RunSpeakerInfo.DeviceIOProcID);
  4477. }
  4478. if (RunSpeakerInfo.errorCount >= 150)
  4479. {
  4480. //force crash
  4481. int* ptr = (int*)(RunSpeakerInfo.errorCount);
  4482. *ptr = 0;
  4483. }
  4484. return -1;
  4485. }
  4486. if (device != RunSpeakerInfo.DeviceID)
  4487. {
  4488. RunSpeakerInfo.errorCount++;
  4489. if (RunSpeakerInfo.errorCount >= 1024)
  4490. {
  4491. //force crash
  4492. int* ptr = (int*)(RunSpeakerInfo.errorCount);
  4493. *ptr = 0;
  4494. }
  4495. return -1;
  4496. }
  4497. #endif
  4498. AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
  4499. assert(ptrThis != NULL);
  4500. ptrThis->implDeviceIOProc(device,inputData, inputTime, outputData, outputTime);
  4501. // AudioDeviceIOProc functions are supposed to return 0
  4502. return 0;
  4503. }
  4504. OSStatus AudioDeviceMac::outConverterProc(AudioConverterRef,
  4505. UInt32 *numberDataPackets,
  4506. AudioBufferList *data,
  4507. AudioStreamPacketDescription **,
  4508. void *userData)
  4509. {
  4510. AudioDeviceMac *ptrThis = (AudioDeviceMac *) userData;
  4511. assert(ptrThis != NULL);
  4512. return ptrThis->implOutConverterProc(numberDataPackets, data);
  4513. }
  4514. OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID device, const AudioTimeStamp*,
  4515. const AudioBufferList* inputData,
  4516. const AudioTimeStamp* inputTime,
  4517. AudioBufferList*,
  4518. const AudioTimeStamp*, void* clientData)
  4519. {
  4520. #ifdef DEVICE_THREAD_EXCEPTION
  4521. // WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
  4522. // " inDeviceIOProc device = %d,stopped = %d",device,RunMicrophoneInfo.Stopped);
  4523. if (device == RunMicrophoneInfo.DeviceID && RunMicrophoneInfo.Stopped)
  4524. {
  4525. RunMicrophoneInfo.errorCount++;
  4526. if ( RunMicrophoneInfo.errorCount >= 50 && AudioDeviceCreateIOProcID != NULL )
  4527. {
  4528. AudioDeviceStop(RunMicrophoneInfo.DeviceID, RunMicrophoneInfo.DeviceIOProcID);
  4529. AudioDeviceDestroyIOProcID(RunMicrophoneInfo.DeviceID, RunMicrophoneInfo.DeviceIOProcID);
  4530. }
  4531. if (RunMicrophoneInfo.errorCount >= 151)
  4532. {
  4533. //force crash
  4534. int* ptr = (int *)RunMicrophoneInfo.errorCount;
  4535. *ptr = 0;
  4536. }
  4537. return 0;
  4538. }
  4539. if (device != RunMicrophoneInfo.DeviceID)
  4540. {
  4541. RunMicrophoneInfo.errorCount++;
  4542. if (RunMicrophoneInfo.errorCount >= 1025)
  4543. {
  4544. //force crash
  4545. int* ptr = (int *)RunMicrophoneInfo.errorCount;
  4546. *ptr = 0;
  4547. }
  4548. return 0;
  4549. }
  4550. #endif
  4551. AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
  4552. assert(ptrThis != NULL);
  4553. ptrThis->implInDeviceIOProc(device,inputData, inputTime);
  4554. // AudioDeviceIOProc functions are supposed to return 0
  4555. return 0;
  4556. }
  4557. OSStatus AudioDeviceMac::inConverterProc(
  4558. AudioConverterRef,
  4559. UInt32 *numberDataPackets,
  4560. AudioBufferList *data,
  4561. AudioStreamPacketDescription ** /*dataPacketDescription*/,
  4562. void *userData)
  4563. {
  4564. AudioDeviceMac *ptrThis = static_cast<AudioDeviceMac*> (userData);
  4565. assert(ptrThis != NULL);
  4566. return ptrThis->implInConverterProc(numberDataPackets, data);
  4567. }
  4568. OSStatus AudioDeviceMac::implDeviceIOProc(AudioDeviceID device,const AudioBufferList *inputData,
  4569. const AudioTimeStamp *inputTime,
  4570. AudioBufferList *outputData,
  4571. const AudioTimeStamp *outputTime)
  4572. {
  4573. OSStatus err = noErr;
  4574. UInt64 outputTimeNs = AudioConvertHostTimeToNanos(outputTime->mHostTime);
  4575. UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
  4576. /*
  4577. if (device == _inputDeviceID)
  4578. {
  4579. implInDeviceIOProc(device,inputData, inputTime);
  4580. }
  4581. */
  4582. if (device != _outputDeviceID)
  4583. {
  4584. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  4585. " Error device id not match device = %d,_outputid = %d",device,_outputDeviceID);
  4586. return 0;
  4587. }
  4588. _playCallbackHappened = true;
  4589. // Check if we should close down audio device
  4590. // Double-checked locking optimization to remove locking overhead
  4591. if (_doStop)
  4592. {
  4593. _critSect.Enter();
  4594. if (_doStop)
  4595. {
  4596. {
  4597. // In the case of a shared device, the single driving ioProc
  4598. // is stopped here
  4599. if (AudioDeviceCreateIOProcID != NULL)
  4600. {
  4601. WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
  4602. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4603. " implDeviceIOProc AudioDeviceStop _inputDeviceID = %d _outputDeviceID = %d,_deviceIOProcID = %d.",_inputDeviceID,_outputDeviceID,_deviceIOProcID);
  4604. }
  4605. else
  4606. {
  4607. WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID,deviceIOProc));
  4608. }
  4609. if (AudioDeviceDestroyIOProcID != NULL)
  4610. {
  4611. WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_outputDeviceID,
  4612. _deviceIOProcID));
  4613. }
  4614. else
  4615. {
  4616. if (err == noErr)
  4617. {
  4618. WEBRTC_CA_LOG_WARN(AudioDeviceRemoveIOProc(_outputDeviceID, deviceIOProc));
  4619. }
  4620. }
  4621. if (err == noErr)
  4622. {
  4623. WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
  4624. _id, " Playout or shared device stopped");
  4625. }
  4626. }
  4627. if (err == noErr)
  4628. {
  4629. _doStop = false;
  4630. _stopEvent.Set();
  4631. }
  4632. _critSect.Leave();
  4633. return 0;
  4634. }
  4635. _critSect.Leave();
  4636. }
  4637. if (!_playing)
  4638. {
  4639. // This can be the case when a shared device is capturing but not
  4640. // rendering. We allow the checks above before returning to avoid a
  4641. // timeout when capturing is stopped.
  4642. return 0;
  4643. }
  4644. _critSectPlayFormatChange.Enter();
  4645. assert(_outStreamFormat.mBytesPerFrame != 0);
  4646. UInt32 size = outputData->mBuffers->mDataByteSize
  4647. / _outStreamFormat.mBytesPerFrame;
  4648. if (outputData->mBuffers->mNumberChannels != _outStreamFormat.mChannelsPerFrame)
  4649. {
  4650. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  4651. " implDeviceIOProc outputData mNumberBuffers = %d, channelnum = %d, dataByteSize = %d, framenum = %d",outputData->mNumberBuffers, outputData->mBuffers->mNumberChannels, outputData->mBuffers->mDataByteSize, size);
  4652. }
  4653. outputData->mNumberBuffers = 1;
  4654. // TODO(xians): signal an error somehow?
  4655. err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc,
  4656. this, &size, outputData, NULL);
  4657. if (size == 0 || err != noErr)
  4658. {
  4659. _playConvertFailCount += 1;
  4660. }
  4661. else
  4662. {
  4663. _playConvertFailCount = 0;
  4664. }
  4665. if (_playConvertFailCount >= MAXCONVERTFAILEDCOUNT) {
  4666. AudioConverterDispose(_renderConverter);
  4667. AudioConverterNew(&_outDesiredFormat, &_outStreamFormat,&_renderConverter);
  4668. _critSectPlayFormatChange.Leave();
  4669. _playConvertFailCount = 0;
  4670. return 0;
  4671. }
  4672. if (err != noErr)
  4673. {
  4674. _critSectPlayFormatChange.Leave();
  4675. if (err == 1)
  4676. {
  4677. // This is our own error.
  4678. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  4679. " Error in AudioConverterFillComplexBuffer()");
  4680. return 1;
  4681. } else
  4682. {
  4683. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  4684. " Error in AudioConverterFillComplexBuffer() errorcode = %d", err);
  4685. return 0;
  4686. }
  4687. }
  4688. ring_buffer_size_t bufSizeSamples =
  4689. PaUtil_GetRingBufferReadAvailable(_paRenderBuffer);
  4690. int32_t renderDelayUs = static_cast<int32_t> (1e-3 * (outputTimeNs - nowNs)
  4691. + 0.5);
  4692. renderDelayUs += static_cast<int32_t> ((1.0e6 * bufSizeSamples)
  4693. / _outDesiredFormat.mChannelsPerFrame / _outDesiredFormat.mSampleRate
  4694. + 0.5);
  4695. #ifdef TRACKDEVICEDELAY
  4696. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4697. " implDeviceIOProc report delay nowNS = %llu, outputTimeNs = %llu bufSizeSamples = %d,renderDelayUs = %d",nowNs,outputTimeNs, bufSizeSamples,renderDelayUs);
  4698. #endif
  4699. AtomicSet32(&_renderDelayUs, renderDelayUs);
  4700. _critSectPlayFormatChange.Leave();
  4701. return 0;
  4702. }
  4703. OSStatus AudioDeviceMac::implOutConverterProc(UInt32 *numberDataPackets,
  4704. AudioBufferList *data)
  4705. {
  4706. assert(data->mNumberBuffers == 1);
  4707. ring_buffer_size_t numSamples = *numberDataPackets
  4708. * _outDesiredFormat.mChannelsPerFrame;
  4709. data->mBuffers->mNumberChannels = _outDesiredFormat.mChannelsPerFrame;
  4710. // Always give the converter as much as it wants, zero padding as required.
  4711. data->mBuffers->mDataByteSize = *numberDataPackets
  4712. * _outDesiredFormat.mBytesPerPacket;
  4713. data->mBuffers->mData = _renderConvertData;
  4714. memset(_renderConvertData, 0, sizeof(_renderConvertData));
  4715. PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples);
  4716. kern_return_t kernErr = semaphore_signal_all(_renderSemaphore);
  4717. if (kernErr != KERN_SUCCESS)
  4718. {
  4719. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  4720. " semaphore_signal_all() error: %d", kernErr);
  4721. return 1;
  4722. }
  4723. return 0;
  4724. }
  4725. OSStatus AudioDeviceMac::implInDeviceIOProc(AudioDeviceID device,const AudioBufferList *inputData,
  4726. const AudioTimeStamp *inputTime)
  4727. {
  4728. OSStatus err = noErr;
  4729. UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime);
  4730. UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
  4731. if (device != _inputDeviceID)
  4732. {
  4733. return 0;
  4734. }
  4735. _InputTimeNs = inputTimeNs;
  4736. _NowTimeNs = nowNs;
  4737. _recordCallbackHappened = true;
  4738. // Check if we should close down audio device
  4739. // Double-checked locking optimization to remove locking overhead
  4740. if (_doStopRec)
  4741. {
  4742. _critSect.Enter();
  4743. if (_doStopRec)
  4744. {
  4745. {
  4746. // In the case of a shared device, the single driving ioProc
  4747. // is stopped here
  4748. if (AudioDeviceCreateIOProcID != NULL )
  4749. {
  4750. WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
  4751. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4752. " implInDeviceIOProc AudioDeviceStop _inputDeviceID = %d _outputDeviceID = %d _recSameDevice = %d _deviceIOProcID = %d,_inDeviceIOProcID = %d.",_inputDeviceID,_outputDeviceID,_recSameDevice,_deviceIOProcID,_inDeviceIOProcID);
  4753. }
  4754. else
  4755. {
  4756. WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID,inDeviceIOProc));
  4757. }
  4758. if (AudioDeviceDestroyIOProcID != NULL)
  4759. {
  4760. WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
  4761. }
  4762. else
  4763. {
  4764. if (err == noErr)
  4765. {
  4766. WEBRTC_CA_LOG_WARN(AudioDeviceRemoveIOProc(_inputDeviceID, inDeviceIOProc));
  4767. }
  4768. }
  4769. if (err == noErr)
  4770. {
  4771. WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
  4772. _id, " Recording device stopped");
  4773. }
  4774. }
  4775. if (err == noErr)
  4776. {
  4777. _doStopRec = false;
  4778. _stopEventRec.Set();
  4779. }
  4780. _critSect.Leave();
  4781. return 0;
  4782. }
  4783. _critSect.Leave();
  4784. }
  4785. if (!_recording)
  4786. {
  4787. return 0;
  4788. }
  4789. if (device != _inputDeviceID)
  4790. {
  4791. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  4792. " Error device id not match device = %d,_inputDeviceID = %d",device,_inputDeviceID);
  4793. return 0;
  4794. }
  4795. ring_buffer_size_t bufSizeSamples =
  4796. PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer);
  4797. int32_t captureDelayUs = static_cast<int32_t> (1e-3 * (nowNs - inputTimeNs)
  4798. + 0.5);
  4799. captureDelayUs
  4800. += static_cast<int32_t> ((1.0e6 * bufSizeSamples)
  4801. / _inStreamFormat.mChannelsPerFrame / _inStreamFormat.mSampleRate
  4802. + 0.5);
  4803. _InputTimeNs -= static_cast<UInt64>((1.0e9 * bufSizeSamples)
  4804. / _inStreamFormat.mChannelsPerFrame / _inStreamFormat.mSampleRate
  4805. + 0.5);
  4806. #ifdef TRACKDEVICEDELAY
  4807. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  4808. " implInDeviceIOProc report delay nowNS = %llu, inputTimeNs = %llu bufSizeSamples = %d,captureDelayUs = %d",nowNs * 1e-6,inputTimeNs * 1e-6, bufSizeSamples,captureDelayUs);
  4809. #endif
  4810. AtomicSet32(&_captureDelayUs, captureDelayUs);
  4811. AtomicSet32(&_captureDelayUsUpdate, 1);
  4812. assert(inputData->mNumberBuffers == 1);
  4813. ring_buffer_size_t numSamples = inputData->mBuffers->mDataByteSize
  4814. * _inStreamFormat.mChannelsPerFrame / _inStreamFormat.mBytesPerPacket;
  4815. PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData,
  4816. numSamples);
  4817. kern_return_t kernErr = semaphore_signal_all(_captureSemaphore);
  4818. if (kernErr != KERN_SUCCESS)
  4819. {
  4820. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  4821. " semaphore_signal_all() error: %d", kernErr);
  4822. }
  4823. return err;
  4824. }
  4825. OSStatus AudioDeviceMac::implInConverterProc(UInt32 *numberDataPackets,
  4826. AudioBufferList *data)
  4827. {
  4828. assert(data->mNumberBuffers == 1);
  4829. ring_buffer_size_t numSamples = *numberDataPackets * _inStreamFormat.mChannelsPerFrame;
  4830. #ifdef MUTI_MICROPHONE_SUPPORT
  4831. if (_bMutilChannelsMic)
  4832. {
  4833. if (_tmpRecordBufferSize < numSamples)
  4834. {
  4835. if (_pTmpRecordBuffer)
  4836. {
  4837. delete [] _pTmpRecordBuffer;
  4838. _pTmpRecordBuffer = NULL;
  4839. }
  4840. _tmpRecordBufferSize = numSamples;
  4841. }
  4842. if (_pTmpRecordBuffer == NULL)
  4843. {
  4844. _pTmpRecordBuffer = new Float32[numSamples];
  4845. }
  4846. if (numSamples == PaUtil_ReadRingBuffer(_paCaptureBuffer, (void*)(_pTmpRecordBuffer), numSamples))
  4847. {
  4848. data->mBuffers->mData = (void*)(_pTmpRecordBuffer);
  4849. }
  4850. data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame;
  4851. *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame;
  4852. data->mBuffers->mDataByteSize = *numberDataPackets * _inStreamFormat.mBytesPerPacket;
  4853. }
  4854. else
  4855. {
  4856. #endif
  4857. // Pass the read pointer directly to the converter to avoid a memcpy.
  4858. void* dummyPtr;
  4859. ring_buffer_size_t dummySize;
  4860. PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples,
  4861. &data->mBuffers->mData, &numSamples,
  4862. &dummyPtr, &dummySize);
  4863. PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples);
  4864. data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame;
  4865. *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame;
  4866. data->mBuffers->mDataByteSize = *numberDataPackets
  4867. * _inStreamFormat.mBytesPerPacket;
  4868. #ifdef MUTI_MICROPHONE_SUPPORT
  4869. }
  4870. #endif
  4871. return 0;
  4872. }
  4873. bool AudioDeviceMac::RunRender(void* ptrThis)
  4874. {
  4875. return static_cast<AudioDeviceMac*> (ptrThis)->RenderWorkerThread();
  4876. }
  4877. bool AudioDeviceMac::RenderWorkerThread()
  4878. {
  4879. ring_buffer_size_t numSamples = ENGINE_PLAY_BUF_SIZE_IN_SAMPLES
  4880. * _outDesiredFormat.mChannelsPerFrame;
  4881. while (PaUtil_GetRingBufferWriteAvailable(_paRenderBuffer)
  4882. - _renderDelayOffsetSamples < numSamples)
  4883. {
  4884. mach_timespec_t timeout;
  4885. timeout.tv_sec = 0;
  4886. timeout.tv_nsec = TIMER_PERIOD_MS;
  4887. kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout);
  4888. if (kernErr == KERN_OPERATION_TIMED_OUT)
  4889. {
  4890. int32_t signal = AtomicGet32(&_renderDeviceIsAlive);
  4891. if (signal == 0)
  4892. {
  4893. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  4894. " RenderWorkerThread waiting for playout timeout error: %d, exit thread!!!!", kernErr);
  4895. // The render device is no longer alive; stop the worker thread.
  4896. return false;
  4897. }
  4898. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  4899. " RenderWorkerThread waiting for playout timeout error: %d", kernErr);
  4900. if (_playing && _need_detect_play)
  4901. {
  4902. _playWaitErrorCount++;
  4903. if (_playWaitErrorCount >= N_REC_WAIT_ERROR_COUNT)
  4904. {
  4905. _playError = DEVICE_ERROR_PLAYBACK_NO_CALLBACK;
  4906. _playWaitErrorCount = 0;
  4907. return true;
  4908. }
  4909. }
  4910. return true;
  4911. } else if (kernErr != KERN_SUCCESS)
  4912. {
  4913. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  4914. " semaphore_timedwait() error: %d", kernErr);
  4915. if (_playing)
  4916. {
  4917. _playWaitErrorCount++;
  4918. if (_playWaitErrorCount >= N_REC_WAIT_ERROR_COUNT)
  4919. {
  4920. _playWaitErrorCount = 0;
  4921. return true;
  4922. }
  4923. }
  4924. }
  4925. else
  4926. {
  4927. _need_detect_play = false;
  4928. _playWaitErrorCount = 0;
  4929. }
  4930. }
  4931. if (_enableSpkVolumeCheck)
  4932. {
  4933. if (_spkVolumeCheckFreq == 0)
  4934. {
  4935. WebRtc_Word32 refineSpeakerVolumeDB = 0;
  4936. if( 0 != SpeakerVolumeWithDB(_speakerVolumeDB))
  4937. {
  4938. _speakerVolumeDB = 0;
  4939. }
  4940. if (_speakerVolumeDB > (_optVolDB + 4))
  4941. {
  4942. refineSpeakerVolumeDB = (3*(_speakerVolumeDB - _optVolDB + 1))/2;
  4943. }
  4944. else if (_speakerVolumeDB > _optVolDB)
  4945. {
  4946. refineSpeakerVolumeDB = _speakerVolumeDB - _optVolDB;
  4947. }
  4948. else
  4949. {
  4950. refineSpeakerVolumeDB = 0;
  4951. }
  4952. // _ptrAudioBuffer->SetMixerTargetLevelDB(_outputTargetLevelDB - refineSpeakerVolumeDB);
  4953. }
  4954. _spkVolumeCheckFreq++;
  4955. if (_spkVolumeCheckFreq > 20)
  4956. {
  4957. _spkVolumeCheckFreq = 0;
  4958. }
  4959. }
  4960. WebRtc_Word8 playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
  4961. if (!_ptrAudioBuffer)
  4962. {
  4963. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  4964. " capture AudioBuffer is invalid");
  4965. return false;
  4966. }
  4967. // Ask for new PCM data to be played out using the AudioDeviceBuffer.
  4968. WebRtc_UWord32 nSamples =
  4969. _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
  4970. nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer);
  4971. if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES)
  4972. {
  4973. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  4974. " invalid number of output samples(%d)", nSamples);
  4975. }
  4976. WebRtc_UWord32 nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame;
  4977. SInt16 *pPlayBuffer = (SInt16 *) &playBuffer;
  4978. {
  4979. CriticalSectionScoped lock(_zoomDeviceBufferCritSect);
  4980. if (_loopbackRecording && _loopbackLocalSpeakerPlay)
  4981. {
  4982. /*
  4983. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  4984. "RenderWorkerThread _loopbackCaptureBufDataReadIndex(%d),nOutSamples(%d)", _loopbackCaptureBufDataReadIndex,nOutSamples);
  4985. */
  4986. if (_loopbackCaptureAvailbaleBufData != 0) {
  4987. WebRtc_Word32 temp(0);
  4988. // fwrite(_ploopbackCaptureBufData[_loopbackCaptureBufDataReadIndex], 2, nOutSamples, _fPCMFile);
  4989. for (int i = 0; i < nOutSamples; i++)
  4990. {
  4991. if (_outDesiredFormat.mChannelsPerFrame == 2)
  4992. {
  4993. temp = pPlayBuffer[i] + _ploopbackCaptureBufData[_loopbackCaptureBufDataReadIndex][i];
  4994. }
  4995. else if(_outDesiredFormat.mChannelsPerFrame == 1)
  4996. {
  4997. temp = pPlayBuffer[i] + _ploopbackCaptureBufData[_loopbackCaptureBufDataReadIndex][i * 2];
  4998. }
  4999. if (temp > 32767)
  5000. {
  5001. pPlayBuffer[i] = 32767;
  5002. }
  5003. else if(temp < -32768)
  5004. {
  5005. pPlayBuffer[i] = -32768;
  5006. }
  5007. else
  5008. {
  5009. pPlayBuffer[i] = (WebRtc_Word16)temp;
  5010. }
  5011. }
  5012. _loopbackCaptureBufDataReadIndex++;
  5013. if (_loopbackCaptureBufDataReadIndex == MAXLOOPBACKFRAMEBUFNUM)
  5014. {
  5015. _loopbackCaptureBufDataReadIndex = 0;
  5016. }
  5017. _loopbackCaptureAvailbaleBufData--;
  5018. }
  5019. }
  5020. }
  5021. if (_bAudioShareStatus)
  5022. {
  5023. _ptrAudioBuffer->SetLoopbackRenderBuffer((const WebRtc_Word8*)pPlayBuffer,nSamples);
  5024. _ptrAudioBuffer->DeliverLoopbackRenderData();
  5025. }
  5026. if (_macBookProPanRight && (_playChannels == 2))
  5027. {
  5028. // Mix entirely into the right channel and zero the left channel.
  5029. SInt32 sampleInt32 = 0;
  5030. for (WebRtc_UWord32 sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx
  5031. += 2)
  5032. {
  5033. sampleInt32 = pPlayBuffer[sampleIdx];
  5034. sampleInt32 += pPlayBuffer[sampleIdx + 1];
  5035. sampleInt32 /= 2;
  5036. if (sampleInt32 > 32767)
  5037. {
  5038. sampleInt32 = 32767;
  5039. } else if (sampleInt32 < -32768)
  5040. {
  5041. sampleInt32 = -32768;
  5042. }
  5043. pPlayBuffer[sampleIdx] = 0;
  5044. pPlayBuffer[sampleIdx + 1] = static_cast<SInt16> (sampleInt32);
  5045. }
  5046. }
  5047. PaUtil_WriteRingBuffer(_paRenderBuffer, pPlayBuffer, nOutSamples);
  5048. return true;
  5049. }
  5050. bool AudioDeviceMac::RunCapture(void* ptrThis)
  5051. {
  5052. return static_cast<AudioDeviceMac*> (ptrThis)->CaptureWorkerThread();
  5053. }
  5054. int32_t AudioDeviceMac::RegisterAudioDeviceNotify(IAudioDeviceChangeNotify *pNotify)
  5055. {
  5056. _pDeviceChangeNotify = pNotify;
  5057. #ifdef MUTI_MICROPHONE_SUPPORT
  5058. if (_pdeviceNotifier)
  5059. {
  5060. _pdeviceNotifier->Init(_pDeviceChangeNotify);
  5061. }
  5062. #else
  5063. _deviceNotifier.Init(_pDeviceChangeNotify);
  5064. #endif
  5065. return 0;
  5066. }
  5067. int32_t AudioDeviceMac::RegisterAudioVolumeNotify(IAudioVolumeChangeNotify *pNotify)
  5068. {
  5069. CriticalSectionScoped lock(_critSectNotify);
  5070. _pVolumeChangeNotify = pNotify;
  5071. return 0;
  5072. }
  5073. WebRtc_Word32 AudioDeviceMac::MicrophoneSelect(bool& bselected)
  5074. {
  5075. bselected = true;
  5076. return 0;
  5077. }
  5078. bool AudioDeviceMac::CaptureWorkerThread()
  5079. {
  5080. OSStatus err = noErr;
  5081. UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES;
  5082. ring_buffer_size_t numSamples = size * _inStreamFormat.mChannelsPerFrame;
  5083. if (_paCaptureBuffer == NULL)
  5084. {
  5085. return false;
  5086. }
  5087. while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples)
  5088. {
  5089. mach_timespec_t timeout;
  5090. timeout.tv_sec = 0;
  5091. timeout.tv_nsec = TIMER_PERIOD_MS;
  5092. kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout);
  5093. if (kernErr == KERN_OPERATION_TIMED_OUT)
  5094. {
  5095. int32_t signal = AtomicGet32(&_captureDeviceIsAlive);
  5096. if (signal == 0)
  5097. {
  5098. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  5099. " CaptureWorkerThread waiting for record timeout error: %d, exit thread!!!!", kernErr);
  5100. return false;
  5101. }
  5102. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  5103. " CaptureWorkerThread waiting for record timeout error: %d", kernErr);
  5104. if (_recording && _need_detect)
  5105. {
  5106. _recWaitErrorCount++;
  5107. if (_recWaitErrorCount >= N_REC_WAIT_ERROR_COUNT)
  5108. {
  5109. _recError = DEVICE_ERROR_RECORD_NO_CALLBACK;
  5110. _recWaitErrorCount = 0;
  5111. return true;
  5112. }
  5113. }
  5114. return true;
  5115. } else if (kernErr != KERN_SUCCESS)
  5116. {
  5117. if (_recording)
  5118. {
  5119. _recWaitErrorCount++;
  5120. if (_recWaitErrorCount >= N_REC_WAIT_ERROR_COUNT)
  5121. {
  5122. _recWaitErrorCount = 0;
  5123. return true;
  5124. }
  5125. }
  5126. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  5127. " semaphore_wait() error: %d", kernErr);
  5128. }
  5129. else
  5130. {
  5131. _need_detect = false;
  5132. _recWaitErrorCount = 0;
  5133. }
  5134. }
  5135. // WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " CaptureWorkerThread() aaa ");
  5136. if (_critSectFormatChange)
  5137. {
  5138. _critSectFormatChange->Enter();
  5139. }
  5140. UInt32 noRecSamples = ENGINE_REC_BUF_SIZE_IN_SAMPLES
  5141. * _inDesiredFormat.mChannelsPerFrame;
  5142. SInt16 recordBuffer[noRecSamples*4];
  5143. AudioBufferList engineBuffer;
  5144. engineBuffer.mNumberBuffers = 1; // Interleaved channels.
  5145. engineBuffer.mBuffers->mNumberChannels = _inDesiredFormat.mChannelsPerFrame;
  5146. engineBuffer.mBuffers->mDataByteSize = noRecSamples*4*sizeof(SInt16);
  5147. engineBuffer.mBuffers->mData = recordBuffer;
  5148. err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc,
  5149. this, &size, &engineBuffer, NULL);
  5150. if (size != ENGINE_REC_BUF_SIZE_IN_SAMPLES)
  5151. {
  5152. _capConvertFailCount += 1;
  5153. }
  5154. else
  5155. {
  5156. _capConvertFailCount = 0;
  5157. }
  5158. if (_capConvertFailCount >= MAXCONVERTFAILEDCOUNT) {
  5159. WEBRTC_CA_LOG_ERR(AudioConverterDispose(_captureConverter));
  5160. WEBRTC_CA_LOG_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredFormat,
  5161. &_captureConverter));
  5162. _capConvertFailCount = 0;
  5163. }
  5164. if (_critSectFormatChange)
  5165. {
  5166. _critSectFormatChange->Leave();
  5167. }
  5168. if (err != noErr)
  5169. {
  5170. if (err == 1)
  5171. {
  5172. // This is our own error.
  5173. return false;
  5174. } else
  5175. {
  5176. logCAMsg(kTraceError, kTraceAudioDevice, _id,
  5177. "Error in AudioConverterFillComplexBuffer()",
  5178. (const char *) &err);
  5179. return false;
  5180. }
  5181. }
  5182. if((_inDesiredFormat.mChannelsPerFrame > 2) && size == ENGINE_REC_BUF_SIZE_IN_SAMPLES && _recChannels == 1)
  5183. {
  5184. WebRtc_Word16* audio16ptr = (WebRtc_Word16*) recordBuffer;
  5185. for (WebRtc_UWord32 i = 0; i < ENGINE_REC_BUF_SIZE_IN_SAMPLES; i++)
  5186. {
  5187. WebRtc_Word32 audio32 = 0;
  5188. for (WebRtc_UWord32 j = 0; j < _inDesiredFormat.mChannelsPerFrame; j++)
  5189. {
  5190. audio32 += audio16ptr[(_inDesiredFormat.mChannelsPerFrame)*i + j];
  5191. }
  5192. if (audio32 > 32767)
  5193. {
  5194. audio32 = 32767;
  5195. }else if(audio32 < -32768)
  5196. {
  5197. audio32 = -32768;
  5198. }
  5199. recordBuffer[i] = static_cast<WebRtc_Word16> (audio32);
  5200. }
  5201. }
  5202. // TODO(xians): what if the returned size is incorrect?
  5203. if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES)
  5204. {
  5205. WebRtc_UWord32 currentMicLevel(0);
  5206. WebRtc_UWord32 newMicLevel(0);
  5207. WebRtc_Word32 msecOnPlaySide;
  5208. WebRtc_Word32 msecOnRecordSide;
  5209. int32_t captureDelayUs = AtomicGet32(&_captureDelayUs);
  5210. int32_t renderDelayUs = AtomicGet32(&_renderDelayUs);
  5211. #ifdef CHECKTIMESTAMPERROR
  5212. if (_bCheckTimestampError)
  5213. {
  5214. if (captureDelayUs < 0 || renderDelayUs < 0 || (1e-3 * captureDelayUs) > 5000 || (1e-3 * renderDelayUs) > 5000)
  5215. {
  5216. _timestampErrorCount++;
  5217. }
  5218. else
  5219. {
  5220. _timestampErrorCount = 0;
  5221. }
  5222. if (_timestampErrorCount > 3000)
  5223. {
  5224. _recError = DEVICE_ERROR_TIMESTAMP_EXCEPTION;
  5225. // resetCoreAudioService();
  5226. _bCheckTimestampError = false;
  5227. }
  5228. }
  5229. #endif
  5230. int32_t captureDelayUsUpdate = AtomicGet32(&_captureDelayUsUpdate);
  5231. //Add by Siping
  5232. if (captureDelayUsUpdate == 1)
  5233. {
  5234. AtomicSet32(&_captureDelayUsUpdate, 0);
  5235. _captureDelayUsPrevious = captureDelayUs;
  5236. if (_InputTimeNs < _NowTimeNs)
  5237. {
  5238. _recDataInputTimeNs = _InputTimeNs;
  5239. }
  5240. else
  5241. {
  5242. _recDataInputTimeNs += 10e6;
  5243. }
  5244. }
  5245. else
  5246. {
  5247. _captureDelayUsPrevious = _captureDelayUsPrevious - 10000;
  5248. if (_captureDelayUsPrevious < 0)
  5249. _captureDelayUsPrevious = 0;
  5250. captureDelayUs = _captureDelayUsPrevious;
  5251. _recDataInputTimeNs += 10e6;
  5252. }
  5253. _msecOnPlaySide = msecOnPlaySide = static_cast<WebRtc_Word32> (1e-3 * (renderDelayUs
  5254. + _renderLatencyUs) + 0.5);
  5255. _msecOnRecordSide = msecOnRecordSide = static_cast<WebRtc_Word32> (1e-3 * (captureDelayUs
  5256. + _captureLatencyUs) + 0.5);
  5257. #ifdef TRACKDEVICEDELAY
  5258. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  5259. " CaptureWorkerThread report delay msecOnPlaySide = %d,msecOnRecordSide = %d",msecOnPlaySide,msecOnRecordSide);
  5260. #endif
  5261. if (!_ptrAudioBuffer)
  5262. {
  5263. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  5264. " capture AudioBuffer is invalid");
  5265. return false;
  5266. }
  5267. // store the recorded buffer (no action will be taken if the
  5268. // #recorded samples is not a full buffer)
  5269. _ptrAudioBuffer->SetRecordedBuffer((WebRtc_Word8*) &recordBuffer,
  5270. (WebRtc_UWord32) size);
  5271. if (AGC())
  5272. {
  5273. // store current mic level in the audio buffer if AGC is enabled
  5274. if (MicrophoneVolume(currentMicLevel) == 0)
  5275. {
  5276. // this call does not affect the actual microphone volume
  5277. _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
  5278. }
  5279. }
  5280. _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide, 0);
  5281. // deliver recorded samples at specified sample rate, mic level etc.
  5282. // to the observer using callback
  5283. _ptrAudioBuffer->DeliverRecordedData();
  5284. if (AGC())
  5285. {
  5286. newMicLevel = _ptrAudioBuffer->NewMicLevel();
  5287. if (newMicLevel != 0)
  5288. {
  5289. // The VQE will only deliver non-zero microphone levels when
  5290. // a change is needed.
  5291. // Set this new mic level (received from the observer as return
  5292. // value in the callback).
  5293. WEBRTC_TRACE(kTraceStream, kTraceAudioDevice,
  5294. _id, " AGC change of volume: old=%u => new=%u",
  5295. currentMicLevel, newMicLevel);
  5296. if (SetMicrophoneVolume(newMicLevel) == -1)
  5297. {
  5298. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  5299. " the required modification of the microphone "
  5300. "volume failed");
  5301. }
  5302. }
  5303. }
  5304. }
  5305. return true;
  5306. }
  5307. WebRtc_Word32 AudioDeviceMac::SetLoopbackRecordDevice(WebRtc_UWord16 index)
  5308. {
  5309. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  5310. "AudioDeviceMac::SetLoopbackRecordDevice(index=%u)", index);
  5311. if (_loopbackDeviceIsInitialized)
  5312. {
  5313. return -1;
  5314. }
  5315. AudioDeviceID playDevices[MaxNumberDevices];
  5316. WebRtc_UWord8 ZoomAudioDeviceNum = 0;
  5317. WebRtc_UWord32 nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput,
  5318. playDevices, MaxNumberDevices,ZoomAudioDeviceNum);
  5319. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5320. "SetLoopbackRecordDevice number of availiable waveform-audio output devices is %u",
  5321. nDevices);
  5322. if (index > (nDevices - 1))
  5323. {
  5324. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  5325. "SetLoopbackRecordDevice device index is out of range [0,%u]", (nDevices - 1));
  5326. return -1;
  5327. }
  5328. _loopbackDeviceIndex = index;
  5329. _loopbackDeviceIsSpecified = true;
  5330. return 0;
  5331. }
  5332. WebRtc_Word32 AudioDeviceMac::SetLoopbackRecordDevice(AudioDeviceModule::WindowsDeviceType device)
  5333. {
  5334. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5335. "SetLoopbackRecordDevice WindowsDeviceType not supported");
  5336. if(0 == SetLoopbackRecordDevice(0))
  5337. {
  5338. return 0;
  5339. }
  5340. else
  5341. {
  5342. return -1;
  5343. }
  5344. return 0;
  5345. }
  5346. int32_t AudioDeviceMac::on_outer_audio_data(const char* audioSamples,
  5347. const uint32_t nSamples,
  5348. const uint8_t nBytesPerSample,
  5349. const uint8_t nChannels,
  5350. const uint32_t samplesPerSec)
  5351. {
  5352. #ifdef BUILD_FOR_MIMO
  5353. CriticalSectionScoped lock(_loopbackCritSect);
  5354. if (_LoopBackDeviceSource != kExtraSource)
  5355. {
  5356. return 0;
  5357. }
  5358. if (_loopbackRecording)
  5359. {
  5360. ring_buffer_size_t bufSizeSamples =
  5361. PaUtil_GetRingBufferReadAvailable(_paLoopbackCaptureBuffer);
  5362. ring_buffer_size_t numSamples =nSamples/nBytesPerSample;
  5363. PaUtil_WriteRingBuffer(_paLoopbackCaptureBuffer, audioSamples, numSamples);
  5364. kern_return_t kernErr = semaphore_signal_all(_loopbackCaptureSemaphore);
  5365. if (kernErr != KERN_SUCCESS)
  5366. {
  5367. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  5368. "on_outer_audio_data semaphore_signal_all() error: %d", kernErr);
  5369. }
  5370. }
  5371. return 0;
  5372. #else
  5373. return 0;
  5374. #endif
  5375. }
  5376. int32_t AudioDeviceMac::SetLoopbackDeviceSource(const LoopbackDeviceSourceType DeviceSource)
  5377. {
  5378. #ifdef BUILD_FOR_MIMO
  5379. CriticalSectionScoped lock(_loopbackCritSect);
  5380. _LoopBackDeviceSource = DeviceSource;
  5381. #endif
  5382. return 0;
  5383. }
  5384. #ifdef BUILD_FOR_MIMO
  5385. WebRtc_Word32 AudioDeviceMac::InitLoopbackDeviceBM()
  5386. {
  5387. return 0;
  5388. }
  5389. WebRtc_Word32 AudioDeviceMac::InitLoopbackRecordingBM()
  5390. {
  5391. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5392. "%s", __FUNCTION__);
  5393. CriticalSectionScoped lock(_loopbackCritSect);
  5394. if (_loopbackRecording)
  5395. {
  5396. return -1;
  5397. }
  5398. if (_loopbackRecIsInitialized)
  5399. {
  5400. return 0;
  5401. }
  5402. // Initialize the loopback (devices might have been added or removed)
  5403. if (InitLoopbackDevice() == -1)
  5404. {
  5405. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  5406. " InitLoopbackDevice() failed");
  5407. }
  5408. _doStopLoopbackRec = false;
  5409. if (_ptrAudioBuffer)
  5410. {
  5411. // Update audio buffer with the selected parameters
  5412. _ptrAudioBuffer->SetLoopbackRecSampleRate(N_REC_SAMPLES_PER_SEC);
  5413. _ptrAudioBuffer->SetLoopbackRecChannels((WebRtc_UWord8) _loopbackRecChannels);
  5414. _ptrAudioBuffer->SetLoopbackRenderSampleRate(N_PLAY_SAMPLES_PER_SEC);
  5415. _ptrAudioBuffer->SetLoopbackRenderChannels((WebRtc_UWord8)_playChannels);
  5416. }
  5417. _loopbackCaptureBufDataBM = NULL;
  5418. _loopbackRecIsInitialized = true;
  5419. return 0;
  5420. }
  5421. WebRtc_Word32 AudioDeviceMac::StartLoopbackRecordingBM()
  5422. {
  5423. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5424. "%s", __FUNCTION__);
  5425. CriticalSectionScoped lock(_loopbackCritSect);
  5426. if (!_loopbackRecIsInitialized)
  5427. {
  5428. return -1;
  5429. }
  5430. if (_loopbackRecording)
  5431. {
  5432. return 0;
  5433. }
  5434. if (!_initialized)
  5435. {
  5436. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  5437. " loopback Recording worker thread has not been started");
  5438. return -1;
  5439. }
  5440. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5441. " StartLoopbackRecordingBM _loopbackCaptureBufDataBM = %d,",_loopbackCaptureBufDataBM);
  5442. if (_loopbackCaptureBufDataBM == NULL)
  5443. {
  5444. UInt32 powerOfTwo = 1;
  5445. while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES)
  5446. {
  5447. powerOfTwo <<= 1;
  5448. }
  5449. _loopbackCaptureBufSizeSamples = powerOfTwo;
  5450. _loopbackCaptureBufDataBM = new int16_t[_loopbackCaptureBufSizeSamples];
  5451. }
  5452. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5453. " StartLoopbackRecordingBM _loopbackCaptureBufSizeSamples = %d,",_loopbackCaptureBufSizeSamples);
  5454. if (_paLoopbackCaptureBuffer == NULL)
  5455. {
  5456. _paLoopbackCaptureBuffer = new PaUtilRingBuffer;
  5457. ring_buffer_size_t bufSize = -1;
  5458. bufSize = PaUtil_InitializeRingBuffer(_paLoopbackCaptureBuffer,
  5459. sizeof(int16_t),
  5460. _loopbackCaptureBufSizeSamples,
  5461. _loopbackCaptureBufDataBM);
  5462. if (bufSize == -1)
  5463. {
  5464. WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
  5465. _id, "StartLoopbackRecording PaUtil_InitializeRingBuffer() error");
  5466. return -1;
  5467. }
  5468. }
  5469. PaUtil_FlushRingBuffer(_paLoopbackCaptureBuffer);
  5470. _loopbackCaptureBufDataReadIndex = 0;
  5471. _loopbackCaptureBufDataWriteIndex = 0;
  5472. _loopbackCaptureAvailbaleBufData = 0;
  5473. if (_loopbackCaptureWorkerThread == NULL)
  5474. {
  5475. _loopbackCaptureWorkerThread
  5476. = ThreadWrapper::CreateThread(RunLoopbackCapture, this, kRealtimePriority,
  5477. "LoopbackCaptureWorkerThread");
  5478. if (_loopbackCaptureWorkerThread == NULL)
  5479. {
  5480. WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
  5481. _id, " loopback Capture CreateThread() error");
  5482. return -1;
  5483. }
  5484. }
  5485. kern_return_t kernErr = KERN_SUCCESS;
  5486. kernErr = semaphore_create(mach_task_self(), &_loopbackCaptureSemaphore,
  5487. SYNC_POLICY_FIFO, 0);
  5488. if (kernErr != KERN_SUCCESS)
  5489. {
  5490. WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
  5491. " loopback semaphore_create() error: %d", kernErr);
  5492. return -1;
  5493. }
  5494. OSStatus err = noErr;
  5495. unsigned int threadID(0);
  5496. if (_loopbackCaptureWorkerThread != NULL)
  5497. {
  5498. _loopbackCaptureWorkerThread->Start(threadID);
  5499. }
  5500. _loopbackCaptureWorkerThreadId = threadID;
  5501. _loopbackRecording = true;
  5502. if (_ptrAudioBuffer)
  5503. {
  5504. if (_ptrAudioBuffer->SetAudioShareStatus(true) == 0)
  5505. {
  5506. _bAudioShareStatus = true;
  5507. }
  5508. }
  5509. if (_playing)
  5510. {
  5511. bool curSpeakerMuteStatus = false;
  5512. SpeakerMute(curSpeakerMuteStatus);
  5513. if (curSpeakerMuteStatus)
  5514. {
  5515. SetSpeakerMute(false);
  5516. }
  5517. /*
  5518. WebRtc_UWord32 curSpeakerVolume = 0;
  5519. SpeakerVolume(curSpeakerVolume);
  5520. if (curSpeakerVolume < 100)
  5521. {
  5522. SetSpeakerVolume(100);
  5523. }
  5524. */
  5525. }
  5526. /*
  5527. char name[128];
  5528. sprintf(name, "/Users/matt/Library/Logs/zoom.us/audio/loopbackrecord%p.pcm",this);
  5529. _fPCMFile = fopen(name, "wb");
  5530. sprintf(name, "/Users/matt/Library/Logs/zoom.us/audio/loopbackrecordorg%p.pcm",this);
  5531. _fPCMFileOrg = fopen(name, "wb");
  5532. */
  5533. return 0;
  5534. }
  5535. WebRtc_Word32 AudioDeviceMac::StopLoopbackRecordingBM()
  5536. {
  5537. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5538. "%s", __FUNCTION__);
  5539. CriticalSectionScoped lock(_loopbackCritSect);
  5540. if (!_loopbackRecIsInitialized)
  5541. {
  5542. return 0;
  5543. }
  5544. _loopbackCritSect.Leave();
  5545. if (_loopbackCaptureWorkerThread != NULL)
  5546. {
  5547. if (!_loopbackCaptureWorkerThread->Stop())
  5548. {
  5549. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  5550. " Timed out waiting for the loopback caputre worker thread to "
  5551. "stop.");
  5552. }
  5553. }
  5554. _loopbackCritSect.Enter();
  5555. if (_loopbackCaptureBufDataBM)
  5556. {
  5557. delete[] _loopbackCaptureBufDataBM;
  5558. _loopbackCaptureBufDataBM = NULL;
  5559. }
  5560. if (_paLoopbackCaptureBuffer)
  5561. {
  5562. delete _paLoopbackCaptureBuffer;
  5563. _paLoopbackCaptureBuffer = NULL;
  5564. }
  5565. kern_return_t kernErr = KERN_SUCCESS;
  5566. kernErr = semaphore_destroy(mach_task_self(), _loopbackCaptureSemaphore);
  5567. if (kernErr != KERN_SUCCESS)
  5568. {
  5569. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  5570. " StopLoopbackRecording semaphore_destroy() error: %d", kernErr);
  5571. }
  5572. if (_loopbackCaptureWorkerThread)
  5573. {
  5574. delete _loopbackCaptureWorkerThread;
  5575. _loopbackCaptureWorkerThread = NULL;
  5576. }
  5577. _loopbackRecIsInitialized = false;
  5578. _loopbackRecording = false;
  5579. if (_ptrAudioBuffer)
  5580. {
  5581. if (_ptrAudioBuffer->SetAudioShareStatus(false) == 0)
  5582. {
  5583. _bAudioShareStatus = false;
  5584. }
  5585. }
  5586. /*
  5587. fclose(_fPCMFile);
  5588. fclose(_fPCMFileOrg);
  5589. */
  5590. return 0;
  5591. }
  5592. #endif
  5593. WebRtc_Word32 AudioDeviceMac::InitLoopbackDevice()
  5594. {
  5595. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
  5596. "%s", __FUNCTION__);
  5597. #ifdef BUILD_FOR_MIMO
  5598. if (_LoopBackDeviceSource == kExtraSource)
  5599. {
  5600. return InitLoopbackDeviceBM();
  5601. }
  5602. #endif
  5603. CriticalSectionScoped lock(_loopbackCritSect);
  5604. CheckAndReplaceZoomDevice();
  5605. if (!ZoomAudioDeviceCheck())
  5606. {
  5607. return -1;
  5608. }
  5609. if (_loopbackRecording)
  5610. {
  5611. return -1;
  5612. }
  5613. return 0;
  5614. }
  5615. WebRtc_Word32 AudioDeviceMac::InitLoopbackRecording()
  5616. {
  5617. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5618. "%s", __FUNCTION__);
  5619. #ifdef BUILD_FOR_MIMO
  5620. if (_LoopBackDeviceSource == kExtraSource)
  5621. {
  5622. return InitLoopbackRecordingBM();
  5623. }
  5624. #endif
  5625. CriticalSectionScoped lock(_loopbackCritSect);
  5626. if (_loopbackRecording)
  5627. {
  5628. return -1;
  5629. }
  5630. if (_loopbackRecIsInitialized)
  5631. {
  5632. return 0;
  5633. }
  5634. // Initialize the loopback (devices might have been added or removed)
  5635. if (InitLoopbackDevice() == -1)
  5636. {
  5637. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  5638. " InitLoopbackDevice() failed");
  5639. }
  5640. if (!GetSystemDefaultPlayDevice(_SystemDefaultSpeakerID))
  5641. {
  5642. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  5643. " InitLoopbackDevice() _SystemDefaultSpeakerID = %d",_SystemDefaultSpeakerID);
  5644. return -1;
  5645. }
  5646. setZoomAudioDeviceProperty(true);
  5647. if (!SetSystemDefaultPlayDevice(_zoomDeviceSpeakerID))
  5648. {
  5649. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  5650. " InitLoopbackDevice() _zoomDeviceSpeakerID = %d",_zoomDeviceSpeakerID);
  5651. SetSystemDefaultPlayDevice(_SystemDefaultSpeakerID);
  5652. _SystemDefaultSpeakerID = kAudioObjectUnknown;
  5653. setZoomAudioDeviceProperty(false);
  5654. return -1;
  5655. }
  5656. AudioDeviceID nowDefaultPlayDevice = kAudioDeviceUnknown;
  5657. if (!GetSystemDefaultPlayDevice(nowDefaultPlayDevice))
  5658. {
  5659. SetSystemDefaultPlayDevice(_SystemDefaultSpeakerID);
  5660. _SystemDefaultSpeakerID = kAudioObjectUnknown;
  5661. setZoomAudioDeviceProperty(false);
  5662. return -1;
  5663. }
  5664. if(_zoomDeviceSpeakerID != nowDefaultPlayDevice)
  5665. {
  5666. SetSystemDefaultPlayDevice(_SystemDefaultSpeakerID);
  5667. _SystemDefaultSpeakerID = kAudioObjectUnknown;
  5668. setZoomAudioDeviceProperty(false);
  5669. return -1;
  5670. }
  5671. if(_zoomDeviceMicID != kAudioObjectUnknown)
  5672. {
  5673. _mixerManager.SetMicrophoneMute(false,_zoomDeviceMicID,2);
  5674. _mixerManager.SetMicrophoneVolume(255,_zoomDeviceMicID,2,true);
  5675. }
  5676. if(_zoomDeviceSpeakerID != kAudioObjectUnknown)
  5677. {
  5678. _mixerManager.SpeakerMute(_bDefaultSpeakerIsMuted,_SystemDefaultSpeakerID);
  5679. _mixerManager.SetSpeakerVolume(255,true,_zoomDeviceSpeakerID,2,true);
  5680. }
  5681. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5682. " InitLoopbackDevice() _zoomDeviceSpeakerID = %d,_SystemDefaultSpeakerID = %d",_zoomDeviceSpeakerID,_SystemDefaultSpeakerID);
  5683. OSStatus err = noErr;
  5684. UInt32 size = 0;
  5685. _loopbackCaptureDeviceIsAlive = 1;
  5686. _doStopLoopbackRec = false;
  5687. // Get current stream description
  5688. AudioObjectPropertyAddress
  5689. propertyAddress = { kAudioDevicePropertyStreamFormat,
  5690. kAudioDevicePropertyScopeInput, 0 };
  5691. memset(&_loopbackStreamFormat, 0, sizeof(_loopbackStreamFormat));
  5692. size = sizeof(_loopbackStreamFormat);
  5693. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_zoomDeviceMicID,
  5694. &propertyAddress, 0, NULL, &size, &_loopbackStreamFormat));
  5695. if (_loopbackStreamFormat.mFormatID != kAudioFormatLinearPCM)
  5696. {
  5697. logCAMsg(kTraceError, kTraceAudioDevice, _id,
  5698. "Unacceptable _loopbackStream stream format -> mFormatID",
  5699. (const char *) &_loopbackStreamFormat.mFormatID);
  5700. return -1;
  5701. }
  5702. if (_loopbackStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS)
  5703. {
  5704. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  5705. ", Too many loopback channels on device (mChannelsPerFrame = %d)",
  5706. _loopbackStreamFormat.mChannelsPerFrame);
  5707. return -1;
  5708. }
  5709. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5710. "InitLoopbackDevice loopback stream format:");
  5711. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5712. "InitLoopbackDevice mSampleRate = %f, mChannelsPerFrame = %u",
  5713. _loopbackStreamFormat.mSampleRate, _loopbackStreamFormat.mChannelsPerFrame);
  5714. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5715. "InitLoopbackDevice mBytesPerPacket = %u, mFramesPerPacket = %u",
  5716. _loopbackStreamFormat.mBytesPerPacket,
  5717. _loopbackStreamFormat.mFramesPerPacket);
  5718. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5719. "InitLoopbackDevice mBytesPerFrame = %u, mBitsPerChannel = %u",
  5720. _loopbackStreamFormat.mBytesPerFrame,
  5721. _loopbackStreamFormat.mBitsPerChannel);
  5722. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5723. "InitLoopbackDevice mFormatFlags = %u, mChannelsPerFrame = %u",
  5724. _loopbackStreamFormat.mFormatFlags,
  5725. _loopbackStreamFormat.mChannelsPerFrame);
  5726. logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
  5727. (const char *) &_loopbackStreamFormat.mFormatID);
  5728. // Our preferred format to work with
  5729. if (_loopbackStreamFormat.mChannelsPerFrame >= 2 && (_loopbackRecChannels == 2))
  5730. {
  5731. _loopbackDesiredFormat.mChannelsPerFrame = 2;
  5732. } else
  5733. {
  5734. // Disable stereo recording when we only have one channel on the device.
  5735. _loopbackDesiredFormat.mChannelsPerFrame = 1;
  5736. _loopbackRecChannels = 1;
  5737. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5738. "Stereo recording unavailable on this device");
  5739. }
  5740. if (_ptrAudioBuffer)
  5741. {
  5742. // Update audio buffer with the selected parameters
  5743. _ptrAudioBuffer->SetLoopbackRecSampleRate(N_REC_SAMPLES_PER_SEC);
  5744. _ptrAudioBuffer->SetLoopbackRecChannels((WebRtc_UWord8) _loopbackRecChannels);
  5745. _ptrAudioBuffer->SetLoopbackRenderSampleRate(N_PLAY_SAMPLES_PER_SEC);
  5746. _ptrAudioBuffer->SetLoopbackRenderChannels((WebRtc_UWord8)_playChannels);
  5747. }
  5748. _loopbackDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC;
  5749. _loopbackDesiredFormat.mBytesPerPacket = _loopbackDesiredFormat.mChannelsPerFrame
  5750. * sizeof(SInt16);
  5751. _loopbackDesiredFormat.mFramesPerPacket = 1;
  5752. _loopbackDesiredFormat.mBytesPerFrame = _loopbackDesiredFormat.mChannelsPerFrame
  5753. * sizeof(SInt16);
  5754. _loopbackDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
  5755. _loopbackDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
  5756. | kLinearPCMFormatFlagIsPacked;
  5757. #ifdef WEBRTC_BIG_ENDIAN
  5758. _loopbackDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
  5759. #endif
  5760. _loopbackDesiredFormat.mFormatID = kAudioFormatLinearPCM;
  5761. WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_loopbackStreamFormat, &_loopbackDesiredFormat,
  5762. &_loopbackCaptureConverter));
  5763. // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO)
  5764. // TODO(xians): investigate this block.
  5765. UInt32 bufByteCount = (UInt32)((_loopbackStreamFormat.mSampleRate / 1000.0)
  5766. * 10.0 * N_BLOCKS_IO * _loopbackStreamFormat.mChannelsPerFrame
  5767. * sizeof(Float32));
  5768. if (_loopbackStreamFormat.mFramesPerPacket != 0)
  5769. {
  5770. if (bufByteCount % _loopbackStreamFormat.mFramesPerPacket != 0)
  5771. {
  5772. bufByteCount = ((UInt32)(bufByteCount
  5773. / _loopbackStreamFormat.mFramesPerPacket) + 1)
  5774. * _loopbackStreamFormat.mFramesPerPacket;
  5775. }
  5776. }
  5777. // Ensure the buffer size is within the acceptable range provided by the device.
  5778. propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
  5779. AudioValueRange range;
  5780. size = sizeof(range);
  5781. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_zoomDeviceMicID,
  5782. &propertyAddress, 0, NULL, &size, &range));
  5783. if (range.mMinimum > bufByteCount)
  5784. {
  5785. bufByteCount = range.mMinimum;
  5786. } else if (range.mMaximum < bufByteCount)
  5787. {
  5788. bufByteCount = range.mMaximum;
  5789. }
  5790. propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
  5791. size = sizeof(bufByteCount);
  5792. WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_zoomDeviceMicID,
  5793. &propertyAddress, 0, NULL, size, &bufByteCount));
  5794. // Listen for format changes
  5795. // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged?
  5796. propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
  5797. WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_zoomDeviceMicID,
  5798. &propertyAddress, &objectListenerProc, this));
  5799. // Listen for processor overloads
  5800. propertyAddress.mSelector = kAudioDeviceProcessorOverload;
  5801. WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_zoomDeviceMicID,
  5802. &propertyAddress, &objectListenerProc, this));
  5803. if (AudioDeviceCreateIOProcID != NULL)
  5804. {
  5805. WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_zoomDeviceMicID,
  5806. loopbackDeviceIOProc, this, &_loopbackDeviceIOProcID));
  5807. }
  5808. else
  5809. {
  5810. WEBRTC_CA_RETURN_ON_ERR(AudioDeviceAddIOProc(_zoomDeviceMicID, loopbackDeviceIOProc,this));
  5811. }
  5812. _loopbackRecIsInitialized = true;
  5813. return 0;
  5814. }
  5815. WebRtc_Word32 AudioDeviceMac::StartLoopbackRecording()
  5816. {
  5817. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5818. "%s", __FUNCTION__);
  5819. #ifdef BUILD_FOR_MIMO
  5820. if (_LoopBackDeviceSource == kExtraSource)
  5821. {
  5822. return StartLoopbackRecordingBM();
  5823. }
  5824. #endif
  5825. CriticalSectionScoped lock(_loopbackCritSect);
  5826. if (!_loopbackRecIsInitialized)
  5827. {
  5828. return -1;
  5829. }
  5830. if (_loopbackRecording)
  5831. {
  5832. return 0;
  5833. }
  5834. if (!_initialized)
  5835. {
  5836. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  5837. " loopback Recording worker thread has not been started");
  5838. return -1;
  5839. }
  5840. if (_loopbackCaptureBufData == NULL)
  5841. {
  5842. UInt32 powerOfTwo = 1;
  5843. while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES)
  5844. {
  5845. powerOfTwo <<= 1;
  5846. }
  5847. _loopbackCaptureBufSizeSamples = powerOfTwo;
  5848. _loopbackCaptureBufData = new Float32[_loopbackCaptureBufSizeSamples];
  5849. }
  5850. if (_paLoopbackCaptureBuffer == NULL)
  5851. {
  5852. _paLoopbackCaptureBuffer = new PaUtilRingBuffer;
  5853. ring_buffer_size_t bufSize = -1;
  5854. bufSize = PaUtil_InitializeRingBuffer(_paLoopbackCaptureBuffer,
  5855. sizeof(Float32),
  5856. _loopbackCaptureBufSizeSamples,
  5857. _loopbackCaptureBufData);
  5858. if (bufSize == -1)
  5859. {
  5860. WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
  5861. _id, "StartLoopbackRecording PaUtil_InitializeRingBuffer() error");
  5862. if (!SetSystemDefaultPlayDevice(_SystemDefaultSpeakerID))
  5863. {
  5864. CheckAndReplaceZoomDevice();
  5865. }
  5866. _SystemDefaultSpeakerID = kAudioObjectUnknown;
  5867. return -1;
  5868. }
  5869. }
  5870. PaUtil_FlushRingBuffer(_paLoopbackCaptureBuffer);
  5871. _loopbackCaptureBufDataReadIndex = 0;
  5872. _loopbackCaptureBufDataWriteIndex = 0;
  5873. _loopbackCaptureAvailbaleBufData = 0;
  5874. if (_loopbackCaptureWorkerThread == NULL)
  5875. {
  5876. _loopbackCaptureWorkerThread
  5877. = ThreadWrapper::CreateThread(RunLoopbackCapture, this, kRealtimePriority,
  5878. "LoopbackCaptureWorkerThread");
  5879. if (_loopbackCaptureWorkerThread == NULL)
  5880. {
  5881. WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
  5882. _id, " loopback Capture CreateThread() error");
  5883. if (!SetSystemDefaultPlayDevice(_SystemDefaultSpeakerID))
  5884. {
  5885. CheckAndReplaceZoomDevice();
  5886. }
  5887. _SystemDefaultSpeakerID = kAudioObjectUnknown;
  5888. return -1;
  5889. }
  5890. }
  5891. kern_return_t kernErr = KERN_SUCCESS;
  5892. kernErr = semaphore_create(mach_task_self(), &_loopbackCaptureSemaphore,
  5893. SYNC_POLICY_FIFO, 0);
  5894. if (kernErr != KERN_SUCCESS)
  5895. {
  5896. WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
  5897. " loopback semaphore_create() error: %d", kernErr);
  5898. if (!SetSystemDefaultPlayDevice(_SystemDefaultSpeakerID))
  5899. {
  5900. CheckAndReplaceZoomDevice();
  5901. }
  5902. _SystemDefaultSpeakerID = kAudioObjectUnknown;
  5903. return -1;
  5904. }
  5905. OSStatus err = noErr;
  5906. unsigned int threadID(0);
  5907. if (_loopbackCaptureWorkerThread != NULL)
  5908. {
  5909. _loopbackCaptureWorkerThread->Start(threadID);
  5910. }
  5911. _loopbackCaptureWorkerThreadId = threadID;
  5912. if (AudioDeviceCreateIOProcID != NULL)
  5913. {
  5914. WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_zoomDeviceMicID,_loopbackDeviceIOProcID));
  5915. }
  5916. else
  5917. {
  5918. WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_zoomDeviceMicID, loopbackDeviceIOProc));
  5919. }
  5920. _loopbackRecording = true;
  5921. if (_ptrAudioBuffer)
  5922. {
  5923. if (_ptrAudioBuffer->SetAudioShareStatus(true) == 0)
  5924. {
  5925. _bAudioShareStatus = true;
  5926. }
  5927. }
  5928. if (_playing && _loopbackLocalSpeakerPlay)
  5929. {
  5930. bool curSpeakerMuteStatus = false;
  5931. SpeakerMute(curSpeakerMuteStatus);
  5932. if (curSpeakerMuteStatus)
  5933. {
  5934. SetSpeakerMute(false);
  5935. }
  5936. #ifndef BUILD_FOR_MIMO
  5937. WebRtc_UWord32 curSpeakerVolume = 0;
  5938. SpeakerVolume(curSpeakerVolume);
  5939. if (curSpeakerVolume < 100)
  5940. {
  5941. SetSpeakerVolume(100);
  5942. }
  5943. #endif
  5944. }
  5945. _mixerManager.SetSpeakerMute(_bDefaultSpeakerIsMuted,_zoomDeviceSpeakerID,2);
  5946. /*
  5947. char name[128];
  5948. sprintf(name, "/Users/matt/Library/Logs/zoom.us/audio/loopbackrecord%p.pcm",this);
  5949. _fPCMFile = fopen(name, "wb");
  5950. sprintf(name, "/Users/matt/Library/Logs/zoom.us/audio/loopbackrecordorg%p.pcm",this);
  5951. _fPCMFileOrg = fopen(name, "wb");
  5952. */
  5953. return 0;
  5954. }
  5955. WebRtc_Word32 AudioDeviceMac::StopLoopbackRecording()
  5956. {
  5957. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  5958. "%s", __FUNCTION__);
  5959. #ifdef BUILD_FOR_MIMO
  5960. if (_LoopBackDeviceSource == kExtraSource)
  5961. {
  5962. return StopLoopbackRecordingBM();
  5963. }
  5964. #endif
  5965. CriticalSectionScoped lock(_loopbackCritSect);
  5966. if (!_loopbackRecIsInitialized)
  5967. {
  5968. return 0;
  5969. }
  5970. OSStatus err = noErr;
  5971. // Stop device
  5972. int32_t captureDeviceIsAlive = AtomicGet32(&_loopbackCaptureDeviceIsAlive);
  5973. {
  5974. if (_loopbackRecording && captureDeviceIsAlive == 1)
  5975. {
  5976. _loopbackRecording = false;
  5977. _doStopLoopbackRec = true; // Signal to io proc to stop audio device
  5978. _loopbackCritSect.Leave(); // Cannot be under lock, risk of deadlock
  5979. if (kEventSignaled != _stopEventLoopbackRec.Wait(WAIT_THREAD_TERMINAL))
  5980. {
  5981. CriticalSectionScoped critScoped(_loopbackCritSect);
  5982. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  5983. " Timed out stopping the loopback capture IOProc. "
  5984. "We may have failed to detect a device removal.");
  5985. if (AudioDeviceCreateIOProcID != NULL)
  5986. {
  5987. WEBRTC_CA_LOG_ERR(AudioDeviceStop(_zoomDeviceMicID, _loopbackDeviceIOProcID));
  5988. }
  5989. else
  5990. {
  5991. WEBRTC_CA_LOG_ERR(AudioDeviceStop(_zoomDeviceMicID,loopbackDeviceIOProc));
  5992. }
  5993. if (AudioDeviceDestroyIOProcID != NULL)
  5994. {
  5995. WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_zoomDeviceMicID,
  5996. _loopbackDeviceIOProcID));
  5997. }
  5998. else
  5999. {
  6000. WEBRTC_CA_LOG_WARN(AudioDeviceRemoveIOProc(_zoomDeviceMicID, loopbackDeviceIOProc));
  6001. }
  6002. }
  6003. _loopbackCritSect.Enter();
  6004. _doStopLoopbackRec = false;
  6005. WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
  6006. " loopback Recording stopped");
  6007. }
  6008. }
  6009. // Setting this signal will allow the worker thread to be stopped.
  6010. AtomicSet32(&_loopbackCaptureDeviceIsAlive, 0);
  6011. _loopbackCritSect.Leave();
  6012. if (_loopbackCaptureWorkerThread != NULL)
  6013. {
  6014. if (!_loopbackCaptureWorkerThread->Stop())
  6015. {
  6016. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  6017. " Timed out waiting for the loopback caputre worker thread to "
  6018. "stop.");
  6019. }
  6020. }
  6021. _loopbackCritSect.Enter();
  6022. WEBRTC_CA_LOG_WARN(AudioConverterDispose(_loopbackCaptureConverter));
  6023. if (_loopbackCaptureBufData)
  6024. {
  6025. delete[] _loopbackCaptureBufData;
  6026. _loopbackCaptureBufData = NULL;
  6027. }
  6028. if (_paLoopbackCaptureBuffer)
  6029. {
  6030. delete _paLoopbackCaptureBuffer;
  6031. _paLoopbackCaptureBuffer = NULL;
  6032. }
  6033. kern_return_t kernErr = KERN_SUCCESS;
  6034. kernErr = semaphore_destroy(mach_task_self(), _loopbackCaptureSemaphore);
  6035. if (kernErr != KERN_SUCCESS)
  6036. {
  6037. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  6038. " StopLoopbackRecording semaphore_destroy() error: %d", kernErr);
  6039. }
  6040. if (_loopbackCaptureWorkerThread)
  6041. {
  6042. delete _loopbackCaptureWorkerThread;
  6043. _loopbackCaptureWorkerThread = NULL;
  6044. }
  6045. // Remove listeners.
  6046. AudioObjectPropertyAddress
  6047. propertyAddress = { kAudioDevicePropertyStreamFormat,
  6048. kAudioDevicePropertyScopeInput, 0 };
  6049. WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_zoomDeviceMicID,
  6050. &propertyAddress, &objectListenerProc, this));
  6051. propertyAddress.mSelector = kAudioDeviceProcessorOverload;
  6052. WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_zoomDeviceMicID,
  6053. &propertyAddress, &objectListenerProc, this));
  6054. _loopbackRecIsInitialized = false;
  6055. _loopbackRecording = false;
  6056. if (_ptrAudioBuffer)
  6057. {
  6058. if (_ptrAudioBuffer->SetAudioShareStatus(false) == 0)
  6059. {
  6060. _bAudioShareStatus = false;
  6061. }
  6062. }
  6063. bool bZoomAudioDeviceMuted = false;
  6064. _mixerManager.SpeakerMute(bZoomAudioDeviceMuted, _zoomDeviceSpeakerID);
  6065. if (!SetSystemDefaultPlayDevice(_SystemDefaultSpeakerID))
  6066. {
  6067. CheckAndReplaceZoomDevice();
  6068. }
  6069. else
  6070. {
  6071. setZoomAudioDeviceProperty(false);
  6072. }
  6073. if (GetSystemDefaultPlayDevice(_SystemDefaultSpeakerID))
  6074. {
  6075. _mixerManager.SetSpeakerMute(bZoomAudioDeviceMuted,_SystemDefaultSpeakerID,0);
  6076. }
  6077. _SystemDefaultSpeakerID = kAudioObjectUnknown;
  6078. /*
  6079. fclose(_fPCMFile);
  6080. fclose(_fPCMFileOrg);
  6081. */
  6082. return 0;
  6083. }
  6084. bool AudioDeviceMac::LoopbackRecording() const
  6085. {
  6086. return _loopbackRecording;
  6087. }
  6088. bool AudioDeviceMac::ZoomAudioDeviceCheck()
  6089. {
  6090. if (!checkZoomAudioDeviceVersion())
  6091. {
  6092. return false;
  6093. }
  6094. AudioDeviceID playDevices[MaxNumberDevices];
  6095. WebRtc_UWord8 ZoomAudioDeviceNum = 0;
  6096. WebRtc_UWord32 nPlayDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput,
  6097. playDevices, MaxNumberDevices,ZoomAudioDeviceNum);
  6098. bool bZoomDeviceInstalled = false;
  6099. WebRtc_Word32 zoomPlayDeviceIndex = GetZoomDeviceIndex(kAudioDevicePropertyScopeOutput,playDevices,nPlayDevices);
  6100. if (zoomPlayDeviceIndex != nPlayDevices) {
  6101. _zoomDeviceSpeakerIndex = zoomPlayDeviceIndex;
  6102. _zoomDeviceSpeakerID = playDevices[zoomPlayDeviceIndex];
  6103. bZoomDeviceInstalled = true;
  6104. }
  6105. if (bZoomDeviceInstalled)
  6106. {
  6107. AudioDeviceID recordDevices[MaxNumberDevices];
  6108. WebRtc_UWord8 ZoomAudioDeviceNum = 0;
  6109. WebRtc_UWord32 nRecordDevices = GetNumberDevices(kAudioDevicePropertyScopeInput,
  6110. recordDevices, MaxNumberDevices,ZoomAudioDeviceNum);
  6111. WebRtc_Word32 zoomRecordDeviceIndex = GetZoomDeviceIndex(kAudioDevicePropertyScopeInput,recordDevices,nRecordDevices);
  6112. if (zoomRecordDeviceIndex!= nRecordDevices)
  6113. {
  6114. _zoomDeviceMicIndex = zoomRecordDeviceIndex;
  6115. _zoomDeviceMicID = recordDevices[zoomRecordDeviceIndex];
  6116. }
  6117. else
  6118. {
  6119. bZoomDeviceInstalled = false;
  6120. }
  6121. }
  6122. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  6123. " SoundFlowerDeviceCheck bZoomDeviceInstalled = %d", bZoomDeviceInstalled);
  6124. return bZoomDeviceInstalled;
  6125. }
  6126. bool AudioDeviceMac::SetSystemDefaultPlayDevice(AudioDeviceID device)
  6127. {
  6128. bool bSuccess = false;
  6129. UInt32 thePropSize;
  6130. AudioDeviceID *theDeviceList = NULL;
  6131. UInt32 theNumDevices = 0;
  6132. AudioObjectPropertyAddress thePropertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
  6133. OSStatus result = AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &thePropertyAddress, 0, NULL, &thePropSize);
  6134. if (result)
  6135. {
  6136. return bSuccess;
  6137. }
  6138. // Find out how many devices are on the system
  6139. theNumDevices = thePropSize / sizeof(AudioDeviceID);
  6140. theDeviceList = (AudioDeviceID*)calloc(theNumDevices, sizeof(AudioDeviceID));
  6141. result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &thePropertyAddress, 0, NULL, &thePropSize, theDeviceList);
  6142. for (UInt32 i=0; i < theNumDevices; i++)
  6143. {
  6144. if (device == theDeviceList[i])
  6145. {
  6146. // we found the device, now it as the default output device
  6147. thePropertyAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
  6148. thePropertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
  6149. thePropertyAddress.mElement = kAudioObjectPropertyElementMaster;
  6150. result = AudioObjectSetPropertyData(kAudioObjectSystemObject, &thePropertyAddress, 0, NULL, sizeof(AudioDeviceID), &theDeviceList[i]);
  6151. if (result)
  6152. {
  6153. bSuccess = false;
  6154. }
  6155. else
  6156. {
  6157. bSuccess = true;
  6158. }
  6159. break;
  6160. }
  6161. }
  6162. if (theDeviceList)
  6163. {
  6164. free(theDeviceList);
  6165. }
  6166. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  6167. " SetSystemDefaultPlayDevice %d,%d", device,bSuccess);
  6168. return bSuccess;
  6169. }
  6170. bool AudioDeviceMac::GetSystemDefaultPlayDevice(AudioDeviceID& device)
  6171. {
  6172. AudioObjectPropertyAddress
  6173. propertyAddressDefault = { kAudioHardwarePropertyDefaultOutputDevice,
  6174. kAudioObjectPropertyScopeGlobal,
  6175. kAudioObjectPropertyElementMaster };
  6176. AudioDeviceID usedID;
  6177. UInt32 uintSize = sizeof(UInt32);
  6178. AudioObjectGetPropertyData(kAudioObjectSystemObject,&propertyAddressDefault, 0, NULL, &uintSize, &usedID);
  6179. if (usedID != kAudioDeviceUnknown)
  6180. {
  6181. device = usedID;
  6182. return true;
  6183. } else
  6184. {
  6185. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  6186. "GetSystemDefaultPlayDevice(): Default device unknown");
  6187. return false;
  6188. }
  6189. }
  6190. OSStatus AudioDeviceMac::loopbackDeviceIOProc(AudioDeviceID device, const AudioTimeStamp*,
  6191. const AudioBufferList* inputData,
  6192. const AudioTimeStamp* inputTime,
  6193. AudioBufferList*,
  6194. const AudioTimeStamp*, void* clientData)
  6195. {
  6196. AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
  6197. assert(ptrThis != NULL);
  6198. ptrThis->implLoopbackInDeviceIOProc(device,inputData, inputTime);
  6199. return 0;
  6200. }
  6201. OSStatus AudioDeviceMac::implLoopbackInDeviceIOProc(AudioDeviceID device,const AudioBufferList *inputData,
  6202. const AudioTimeStamp *inputTime)
  6203. {
  6204. OSStatus err = noErr;
  6205. UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime);
  6206. UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
  6207. // Check if we should close down audio device
  6208. // Double-checked locking optimization to remove locking overhead
  6209. if (_doStopLoopbackRec)
  6210. {
  6211. _loopbackCritSect.Enter();
  6212. if (_doStopLoopbackRec)
  6213. {
  6214. if (!_loopbackRecording)
  6215. {
  6216. if (AudioDeviceCreateIOProcID != NULL)
  6217. {
  6218. WEBRTC_CA_LOG_ERR(AudioDeviceStop(_zoomDeviceMicID,_loopbackDeviceIOProcID));
  6219. }
  6220. else
  6221. {
  6222. WEBRTC_CA_LOG_ERR(AudioDeviceStop(_zoomDeviceMicID,loopbackDeviceIOProc));
  6223. }
  6224. if (AudioDeviceDestroyIOProcID != NULL)
  6225. {
  6226. WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_zoomDeviceMicID,_loopbackDeviceIOProcID));
  6227. }
  6228. else
  6229. {
  6230. if (err == noErr)
  6231. {
  6232. WEBRTC_CA_LOG_WARN(AudioDeviceRemoveIOProc(_zoomDeviceMicID, loopbackDeviceIOProc));
  6233. }
  6234. }
  6235. if (err == noErr)
  6236. {
  6237. WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
  6238. _id, " loopback Recording device stopped");
  6239. }
  6240. }
  6241. if (err == noErr)
  6242. {
  6243. _doStopLoopbackRec = false;
  6244. _stopEventLoopbackRec.Set();
  6245. }
  6246. _loopbackCritSect.Leave();
  6247. return 0;
  6248. }
  6249. _loopbackCritSect.Leave();
  6250. }
  6251. if (!_loopbackRecording)
  6252. {
  6253. return 0;
  6254. }
  6255. if (device != _zoomDeviceMicID)
  6256. {
  6257. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  6258. " Error device id not match device = %d,_zoomDeviceMicID = %d",device,_zoomDeviceMicID);
  6259. return 0;
  6260. }
  6261. ring_buffer_size_t bufSizeSamples =
  6262. PaUtil_GetRingBufferReadAvailable(_paLoopbackCaptureBuffer);
  6263. assert(inputData->mNumberBuffers == 1);
  6264. ring_buffer_size_t numSamples = inputData->mBuffers->mDataByteSize
  6265. * _loopbackStreamFormat.mChannelsPerFrame / _loopbackStreamFormat.mBytesPerPacket;
  6266. PaUtil_WriteRingBuffer(_paLoopbackCaptureBuffer, inputData->mBuffers->mData,
  6267. numSamples);
  6268. kern_return_t kernErr = semaphore_signal_all(_loopbackCaptureSemaphore);
  6269. if (kernErr != KERN_SUCCESS)
  6270. {
  6271. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  6272. "implLoopbackInDeviceIOProc semaphore_signal_all() error: %d", kernErr);
  6273. }
  6274. return err;
  6275. }
  6276. OSStatus AudioDeviceMac::loopbackInConverterProc(AudioConverterRef audioConverter,
  6277. UInt32 *numberDataPackets, AudioBufferList *data,
  6278. AudioStreamPacketDescription **dataPacketDescription,
  6279. void *inUserData)
  6280. {
  6281. AudioDeviceMac *ptrThis = static_cast<AudioDeviceMac*> (inUserData);
  6282. assert(ptrThis != NULL);
  6283. return ptrThis->implLoopbackInConverterProc(numberDataPackets, data);
  6284. }
  6285. OSStatus AudioDeviceMac::implLoopbackInConverterProc(UInt32 *numberDataPackets,
  6286. AudioBufferList *data)
  6287. {
  6288. assert(data->mNumberBuffers == 1);
  6289. ring_buffer_size_t numSamples = *numberDataPackets
  6290. * _loopbackStreamFormat.mChannelsPerFrame;
  6291. while (PaUtil_GetRingBufferReadAvailable(_paLoopbackCaptureBuffer) < numSamples)
  6292. {
  6293. mach_timespec_t timeout;
  6294. timeout.tv_sec = 0;
  6295. timeout.tv_nsec = TIMER_PERIOD_MS;
  6296. kern_return_t kernErr = semaphore_timedwait(_loopbackCaptureSemaphore, timeout);
  6297. if (kernErr == KERN_OPERATION_TIMED_OUT)
  6298. {
  6299. int32_t signal = AtomicGet32(&_loopbackCaptureDeviceIsAlive);
  6300. if (signal == 0)
  6301. {
  6302. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  6303. " implLoopbackInConverterProc waiting for loopback record timeout error: %d, exit thread!!!!", kernErr);
  6304. // The capture device is no longer alive; stop the worker thread.
  6305. *numberDataPackets = 0;
  6306. return 1;
  6307. }
  6308. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  6309. " implLoopbackInConverterProc waiting for loopback record timeout error: %d", kernErr);
  6310. } else if (kernErr != KERN_SUCCESS)
  6311. {
  6312. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  6313. " loopback semaphore_wait() error: %d", kernErr);
  6314. }
  6315. }
  6316. void* dummyPtr;
  6317. ring_buffer_size_t dummySize;
  6318. PaUtil_GetRingBufferReadRegions(_paLoopbackCaptureBuffer, numSamples,
  6319. &data->mBuffers->mData, &numSamples,
  6320. &dummyPtr, &dummySize);
  6321. PaUtil_AdvanceRingBufferReadIndex(_paLoopbackCaptureBuffer, numSamples);
  6322. data->mBuffers->mNumberChannels = _loopbackStreamFormat.mChannelsPerFrame;
  6323. *numberDataPackets = numSamples / _loopbackStreamFormat.mChannelsPerFrame;
  6324. data->mBuffers->mDataByteSize = *numberDataPackets * _loopbackStreamFormat.mBytesPerPacket;
  6325. return 0;
  6326. }
  6327. bool AudioDeviceMac::RunLoopbackCapture(void* ptrThis)
  6328. {
  6329. return static_cast<AudioDeviceMac*> (ptrThis)->LoopbackCaptureWorkerThread();
  6330. }
  6331. #ifdef BUILD_FOR_MIMO
  6332. bool AudioDeviceMac::LoopbackCaptureWorkerThreadBM()
  6333. {
  6334. OSStatus err = noErr;
  6335. UInt32 noRecSamples = ENGINE_REC_BUF_SIZE_IN_SAMPLES;
  6336. SInt16 recordBuffer[noRecSamples * _loopbackRecChannels * 2];
  6337. ring_buffer_size_t numSamples = noRecSamples * _loopbackRecChannels;
  6338. while (PaUtil_GetRingBufferReadAvailable(_paLoopbackCaptureBuffer) < numSamples)
  6339. {
  6340. mach_timespec_t timeout;
  6341. timeout.tv_sec = 0;
  6342. timeout.tv_nsec = TIMER_PERIOD_MS;
  6343. kern_return_t kernErr = semaphore_timedwait(_loopbackCaptureSemaphore, timeout);
  6344. if (kernErr == KERN_OPERATION_TIMED_OUT)
  6345. {
  6346. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " loopback record timeout error: %d", kernErr);
  6347. return true;
  6348. } else if (kernErr != KERN_SUCCESS)
  6349. {
  6350. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " loopback semaphore_wait() error: %d", kernErr);
  6351. }
  6352. }
  6353. void* dummyPtr = NULL;
  6354. ring_buffer_size_t dummySize = 0;
  6355. void* dummyPtr1 = NULL;
  6356. ring_buffer_size_t dummySize1 = 0;
  6357. PaUtil_GetRingBufferReadRegions(_paLoopbackCaptureBuffer, numSamples,
  6358. &dummyPtr, &dummySize, &dummyPtr1, &dummySize1);
  6359. PaUtil_AdvanceRingBufferReadIndex(_paLoopbackCaptureBuffer, numSamples);
  6360. if (dummySize != 0)
  6361. {
  6362. memcpy(recordBuffer,(int16_t *)dummyPtr,dummySize*sizeof(int16_t));
  6363. }
  6364. if (dummySize1 != 0)
  6365. {
  6366. memcpy(recordBuffer + dummySize, (int16_t *)dummyPtr1, dummySize1*sizeof(int16_t));
  6367. }
  6368. uint16_t size = noRecSamples;
  6369. {
  6370. if (!_ptrAudioBuffer)
  6371. {
  6372. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  6373. " loobback capture AudioBuffer is invalid");
  6374. return false;
  6375. }
  6376. _ptrAudioBuffer->SetLoopbackRecordedBuffer((WebRtc_Word8*) &recordBuffer, (WebRtc_UWord32) size);
  6377. {
  6378. CriticalSectionScoped lock(_zoomDeviceBufferCritSect);
  6379. if (_loopbackCaptureAvailbaleBufData != MAXLOOPBACKFRAMEBUFNUM)
  6380. {
  6381. memcpy(_ploopbackCaptureBufData[_loopbackCaptureBufDataWriteIndex],recordBuffer,size * 4 );
  6382. _loopbackCaptureBufDataWriteIndex ++;
  6383. if (_loopbackCaptureBufDataWriteIndex == MAXLOOPBACKFRAMEBUFNUM) {
  6384. _loopbackCaptureBufDataWriteIndex = 0;
  6385. }
  6386. _loopbackCaptureAvailbaleBufData++;
  6387. }
  6388. }
  6389. bool bHasEcho = false;
  6390. _ptrAudioBuffer->DeliverLoopbackRecordedData(bHasEcho);
  6391. }
  6392. return true;
  6393. }
  6394. #endif
  6395. bool AudioDeviceMac::LoopbackCaptureWorkerThread()
  6396. {
  6397. #ifdef BUILD_FOR_MIMO
  6398. if (_LoopBackDeviceSource == kExtraSource)
  6399. {
  6400. return LoopbackCaptureWorkerThreadBM();
  6401. }
  6402. #endif
  6403. OSStatus err = noErr;
  6404. UInt32 noRecSamples = ENGINE_REC_BUF_SIZE_IN_SAMPLES
  6405. * _loopbackDesiredFormat.mChannelsPerFrame;
  6406. SInt16 recordBuffer[noRecSamples*4];
  6407. UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES;
  6408. AudioBufferList engineBuffer;
  6409. engineBuffer.mNumberBuffers = 1; // Interleaved channels.
  6410. engineBuffer.mBuffers->mNumberChannels = _loopbackDesiredFormat.mChannelsPerFrame;
  6411. engineBuffer.mBuffers->mDataByteSize = _loopbackDesiredFormat.mBytesPerPacket
  6412. * noRecSamples;
  6413. engineBuffer.mBuffers->mData = recordBuffer;
  6414. err = AudioConverterFillComplexBuffer(_loopbackCaptureConverter, loopbackInConverterProc,
  6415. this, &size, &engineBuffer, NULL);
  6416. if (size != ENGINE_REC_BUF_SIZE_IN_SAMPLES) {
  6417. WEBRTC_CA_LOG_WARN(AudioConverterDispose(_loopbackCaptureConverter));
  6418. WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_loopbackStreamFormat, &_loopbackDesiredFormat,
  6419. &_loopbackCaptureConverter));
  6420. }
  6421. if (err != noErr)
  6422. {
  6423. if (err == 1)
  6424. {
  6425. // This is our own error.
  6426. return false;
  6427. } else
  6428. {
  6429. logCAMsg(kTraceError, kTraceAudioDevice, _id,
  6430. "Error in loopback AudioConverterFillComplexBuffer()",
  6431. (const char *) &err);
  6432. return false;
  6433. }
  6434. }
  6435. // TODO(xians): what if the returned size is incorrect?
  6436. if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES)
  6437. {
  6438. if (!_ptrAudioBuffer)
  6439. {
  6440. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  6441. " loobback capture AudioBuffer is invalid");
  6442. return false;
  6443. }
  6444. _ptrAudioBuffer->SetLoopbackRecordedBuffer((WebRtc_Word8*) &recordBuffer,
  6445. (WebRtc_UWord32) size);
  6446. {
  6447. /*
  6448. WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
  6449. "LoopbackCaptureWorkerThread _loopbackCaptureBufDataWriteIndex(%d),size(%d)", _loopbackCaptureBufDataWriteIndex,size*2);
  6450. */
  6451. CriticalSectionScoped lock(_zoomDeviceBufferCritSect);
  6452. if (_loopbackCaptureAvailbaleBufData != MAXLOOPBACKFRAMEBUFNUM)
  6453. {
  6454. memcpy(_ploopbackCaptureBufData[_loopbackCaptureBufDataWriteIndex],recordBuffer,size * 4 );
  6455. // fwrite(_ploopbackCaptureBufData[_loopbackCaptureBufDataWriteIndex], 4, size, _fPCMFileOrg);
  6456. _loopbackCaptureBufDataWriteIndex ++;
  6457. if (_loopbackCaptureBufDataWriteIndex == MAXLOOPBACKFRAMEBUFNUM) {
  6458. _loopbackCaptureBufDataWriteIndex = 0;
  6459. }
  6460. _loopbackCaptureAvailbaleBufData++;
  6461. }
  6462. }
  6463. bool bHasEcho = false;
  6464. _ptrAudioBuffer->DeliverLoopbackRecordedData(bHasEcho);
  6465. }
  6466. return true;
  6467. }
  6468. WebRtc_Word32 AudioDeviceMac::CheckAndRemoveZoomDevice(const AudioObjectPropertyScope scope,
  6469. AudioDeviceID scopedDeviceIds[],
  6470. const WebRtc_UWord32 deviceListLength)
  6471. {
  6472. AudioDeviceID deviceIds[MaxNumberDevices];
  6473. memcpy(deviceIds,scopedDeviceIds,deviceListLength*sizeof(AudioDeviceID));
  6474. WebRtc_Word32 deviceNum = deviceListLength;
  6475. WebRtc_Word32 i = 0,j = 0,real_index = 0;
  6476. for(i = 0; i < deviceListLength; i++)
  6477. {
  6478. char devName[128];
  6479. memset(devName, 0, sizeof(devName));
  6480. if(-1 == GetDeviceFriendName(scope,deviceIds[i],devName))
  6481. {
  6482. continue;
  6483. }
  6484. #ifdef BUILD_FOR_MIMO
  6485. if ((strstr(devName, ZoomAudioDeviceName2) != 0) || (strstr(devName,BlackmagicAudioName) != 0)/* || (strstr(devName,MagewellAudioName) != 0)*/
  6486. || (strncmp(devName,ZoomAudioDeviceName,strlen(ZoomAudioDeviceName)) == 0))
  6487. #else
  6488. if ((strstr(devName, ZoomAudioDeviceName2) != 0) || (strncmp(devName,ZoomAudioDeviceName,strlen(ZoomAudioDeviceName)) == 0))
  6489. #endif
  6490. {
  6491. for (j = real_index; j < (deviceNum - 1); j++)
  6492. {
  6493. scopedDeviceIds[j] = scopedDeviceIds[j+1];
  6494. }
  6495. deviceNum--;
  6496. continue;
  6497. }
  6498. real_index++;
  6499. }
  6500. return deviceNum;
  6501. }
  6502. WebRtc_Word32 AudioDeviceMac::GetZoomDeviceIndex(const AudioObjectPropertyScope scope,
  6503. AudioDeviceID scopedDeviceIds[],
  6504. const WebRtc_UWord32 deviceListLength)
  6505. {
  6506. WebRtc_Word32 i = 0;
  6507. char devName[128];
  6508. for (i = 0; i < deviceListLength; i++)
  6509. {
  6510. memset(devName, 0, sizeof(devName));
  6511. GetDeviceFriendName(scope,scopedDeviceIds[i],devName);
  6512. if (strncmp(devName, ZoomAudioDeviceName,strlen(ZoomAudioDeviceName)) == 0)
  6513. {
  6514. break;
  6515. }
  6516. }
  6517. return i;
  6518. }
  6519. WebRtc_Word32 AudioDeviceMac::CheckAndIncreaseZoomDevice(const AudioObjectPropertyScope scope,
  6520. const WebRtc_UWord32 deviceListLength,WebRtc_UWord16 &index)
  6521. {
  6522. AudioDeviceID deviceIds[MaxNumberDevices];
  6523. WebRtc_UWord8 ZoomAudioDeviceNum = 0;
  6524. int numberDevices = GetNumberDevices(scope, deviceIds,MaxNumberDevices,ZoomAudioDeviceNum);
  6525. WebRtc_Word32 i = 0;
  6526. char devName[128];
  6527. for (i = 0; i < numberDevices; i++)
  6528. {
  6529. memset(devName, 0, sizeof(devName));
  6530. GetDeviceFriendName(scope,deviceIds[i],devName);
  6531. #ifdef BUILD_FOR_MIMO
  6532. if ((strstr(devName, ZoomAudioDeviceName2) != 0) || (strstr(devName,BlackmagicAudioName) != 0)/* || (strstr(devName,MagewellAudioName) != 0)*/
  6533. || (strncmp(devName,ZoomAudioDeviceName,strlen(ZoomAudioDeviceName)) == 0))
  6534. #else
  6535. if (strstr(devName, ZoomAudioDeviceName2) != 0|| (strncmp(devName,ZoomAudioDeviceName,strlen(ZoomAudioDeviceName)) == 0))
  6536. #endif
  6537. {
  6538. if (index >= i)
  6539. {
  6540. index++;
  6541. }
  6542. }
  6543. }
  6544. return index;
  6545. }
  6546. bool AudioDeviceMac::CheckAndReplaceZoomDevice()
  6547. {
  6548. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  6549. " CheckAndReplaceZoomDevice");
  6550. AudioDeviceID defaultSystemSpeaker = kAudioDeviceUnknown;
  6551. GetSystemDefaultPlayDevice(defaultSystemSpeaker);
  6552. char defaultSystemSpeakerName[128];
  6553. memset(defaultSystemSpeakerName,0, sizeof(defaultSystemSpeakerName));
  6554. if (GetDeviceFriendName(kAudioDevicePropertyScopeOutput, defaultSystemSpeaker, defaultSystemSpeakerName))
  6555. {
  6556. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  6557. " CheckAndReplaceZoomDevice defaultSystemSpeakerName = %s",defaultSystemSpeakerName);
  6558. if (strncmp(defaultSystemSpeakerName, ZoomAudioDeviceName, strlen(ZoomAudioDeviceName)) == 0)
  6559. {
  6560. AudioDeviceID playDevices[MaxNumberDevices];
  6561. WebRtc_UWord8 ZoomAudioDeviceNum = 0;
  6562. WebRtc_Word16 nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices,
  6563. MaxNumberDevices,ZoomAudioDeviceNum);
  6564. int index = 0;
  6565. char devName[128];
  6566. char devGUID[128];
  6567. for (; index < nDevices; index++)
  6568. {
  6569. memset(devName, 0, sizeof(devName));
  6570. memset(devGUID, 0, sizeof(devGUID));
  6571. if(0 == GetDeviceName(kAudioDevicePropertyScopeOutput,index,devName,devGUID))
  6572. {
  6573. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  6574. " CheckAndReplaceZoomDevice index = %d, devName = %s",index,devName);
  6575. if (strstr(devName, "Built-in"))
  6576. {
  6577. if(SetSystemDefaultPlayDevice(playDevices[index]))
  6578. {
  6579. break;
  6580. }
  6581. }
  6582. }
  6583. }
  6584. if (index == nDevices)
  6585. {
  6586. char devName[128];
  6587. char devGUID[128];
  6588. for (index = 0; index < nDevices; index++)
  6589. {
  6590. memset(devName, 0, sizeof(devName));
  6591. memset(devGUID, 0, sizeof(devGUID));
  6592. if(0 == GetDeviceName(kAudioDevicePropertyScopeOutput,index,devName,devGUID))
  6593. {
  6594. if (strncmp(devName, ZoomAudioDeviceName, strlen(ZoomAudioDeviceName)) != 0)
  6595. {
  6596. if(SetSystemDefaultPlayDevice(playDevices[index]))
  6597. {
  6598. break;
  6599. }
  6600. }
  6601. }
  6602. }
  6603. }
  6604. }
  6605. }
  6606. setZoomAudioDeviceProperty(false);
  6607. return true;
  6608. }
  6609. bool AudioDeviceMac::GetDeviceFriendName(const AudioObjectPropertyScope scope,AudioDeviceID DeviceId,char* name)
  6610. {
  6611. UInt32 len = kAdmMaxDeviceNameSize;
  6612. AudioObjectPropertyAddress propertyAddress = {kAudioDevicePropertyDeviceName, scope, 0 };
  6613. if(noErr != AudioObjectGetPropertyData(DeviceId,&propertyAddress, 0, NULL, &len, name))
  6614. {
  6615. return false;
  6616. }
  6617. propertyAddress.mSelector = kAudioDevicePropertyDataSource;
  6618. Boolean hasProperty = AudioObjectHasProperty(DeviceId,&propertyAddress);
  6619. if(hasProperty)
  6620. {
  6621. UInt32 dataSource = 0;
  6622. UInt32 size = sizeof(dataSource);
  6623. if(noErr == AudioObjectGetPropertyData(DeviceId,&propertyAddress, 0, NULL, &size, &dataSource))
  6624. {
  6625. AudioValueTranslation trans;
  6626. CFStringRef str = NULL;
  6627. Boolean ok;
  6628. trans.mInputData = &dataSource;
  6629. trans.mInputDataSize = sizeof(UInt32);
  6630. trans.mOutputData = &str;
  6631. trans.mOutputDataSize = sizeof(CFStringRef);
  6632. propertyAddress.mSelector = kAudioDevicePropertyDataSourceNameForIDCFString;
  6633. size = sizeof(AudioValueTranslation);
  6634. if(AudioObjectGetPropertyData(DeviceId,&propertyAddress,0,NULL,&size,&trans)==noErr)
  6635. {
  6636. char sourceName[128];
  6637. if(str != NULL && CFStringGetCString(str, sourceName, 128, kCFStringEncodingUTF8))
  6638. {
  6639. strcat(name, " (");
  6640. strcat(name, sourceName);
  6641. strcat(name, ")");
  6642. }
  6643. }
  6644. if(str)
  6645. CFRelease(str);
  6646. }
  6647. }
  6648. return true;
  6649. }
  6650. WebRtc_Word32 AudioDeviceMac::StopLoopbackLocalSpeakerPlay()
  6651. {
  6652. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
  6653. _loopbackLocalSpeakerPlay = false;
  6654. return 0;
  6655. }
  6656. WebRtc_Word32 AudioDeviceMac::StartLoopbackLocalSpeakerPlay()
  6657. {
  6658. WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
  6659. _loopbackLocalSpeakerPlay = true;
  6660. return 0;
  6661. }
  6662. WebRtc_Word32 AudioDeviceMac::RecordingSampleRate()
  6663. {
  6664. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s, recSampleRate= %d", __FUNCTION__, _inStreamFormat.mSampleRate);
  6665. return _inStreamFormat.mSampleRate;
  6666. }
  6667. WebRtc_Word32 AudioDeviceMac::AdjustMicrophoneSampleRateBaseDeviceMode()
  6668. {
  6669. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s, use_exclusive_mode= %d", __FUNCTION__, _bUseExclusiveMode);
  6670. if (_bUseExclusiveMode)
  6671. {
  6672. OSStatus err = noErr;
  6673. AudioObjectPropertyAddress propertyAddress ={ kAudioHardwarePropertyDefaultInputDevice,
  6674. kAudioObjectPropertyScopeGlobal,
  6675. kAudioObjectPropertyElementMaster };
  6676. AudioDeviceID deviceId = kAudioDeviceUnknown;
  6677. UInt32 size = sizeof(AudioDeviceID);
  6678. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
  6679. &propertyAddress, 0, NULL, &size, &deviceId));
  6680. if (deviceId == kAudioDeviceUnknown)
  6681. {
  6682. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  6683. " AdjustMicrophoneSampleRate No default device exists");
  6684. return -1;
  6685. }
  6686. else
  6687. {
  6688. UInt32 transportType;
  6689. AudioObjectPropertyAddress propertyAddressForTP = { kAudioDevicePropertyTransportType,kAudioDevicePropertyScopeInput, 0 };
  6690. UInt32 size = sizeof(UInt32);
  6691. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId,&propertyAddressForTP, 0, NULL, &size, &transportType));
  6692. if (transportType == 'bltn')
  6693. {
  6694. AudioObjectPropertyAddress
  6695. propertyAddressForFormat = { kAudioDevicePropertyStreamFormat,
  6696. kAudioDevicePropertyScopeInput, 0 };
  6697. memset(&_microphoneDefaultStreamFormat, 0, sizeof(_microphoneDefaultStreamFormat));
  6698. size = sizeof(_microphoneDefaultStreamFormat);
  6699. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId,
  6700. &propertyAddressForFormat, 0, NULL, &size, &_microphoneDefaultStreamFormat));
  6701. if (_microphoneDefaultStreamFormat.mSampleRate != 48000)
  6702. {
  6703. AudioStreamBasicDescription* p;
  6704. Boolean ow;
  6705. int i;
  6706. UInt32 propertySize=0; //sizeof(p);
  6707. AudioObjectPropertyAddress propertyAddressForAllFormat = { kAudioDevicePropertyStreamFormats,
  6708. kAudioDevicePropertyScopeInput, 0 };
  6709. err = AudioObjectGetPropertyDataSize(deviceId,
  6710. &propertyAddressForAllFormat, 0, NULL, &propertySize);
  6711. if(err == noErr)
  6712. {
  6713. p = (AudioStreamBasicDescription*)malloc(propertySize);
  6714. err = AudioObjectGetPropertyData(deviceId,
  6715. &propertyAddressForAllFormat, 0, NULL, &propertySize, p);
  6716. if (err == noErr)
  6717. {
  6718. int indexFor48KFormat = -1;
  6719. for(int i=0;i<propertySize/sizeof(AudioStreamBasicDescription);i++)
  6720. {
  6721. AudioStreamBasicDescription* pp = &(p[i]);
  6722. if ((pp->mSampleRate == 48000) && (pp->mFormatID == kAudioFormatLinearPCM))
  6723. {
  6724. indexFor48KFormat = i;
  6725. break;
  6726. }
  6727. }
  6728. if (indexFor48KFormat != -1 )
  6729. {
  6730. err = AudioObjectSetPropertyData(deviceId,
  6731. &propertyAddressForFormat,
  6732. 0,
  6733. NULL,
  6734. sizeof(AudioStreamBasicDescription),
  6735. &(p[indexFor48KFormat]));
  6736. if (err == noErr)
  6737. {
  6738. _bMicrophoneDefaultStreamFormatChanged = true;
  6739. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  6740. "AdjustMicrophoneSampleRate set microphone format to 48K");
  6741. }
  6742. }
  6743. }
  6744. free(p);
  6745. }
  6746. }
  6747. }
  6748. }
  6749. }
  6750. else
  6751. {
  6752. if (_bMicrophoneDefaultStreamFormatChanged)
  6753. {
  6754. OSStatus err = noErr;
  6755. AudioObjectPropertyAddress propertyAddress ={ kAudioHardwarePropertyDefaultInputDevice,
  6756. kAudioObjectPropertyScopeGlobal,
  6757. kAudioObjectPropertyElementMaster };
  6758. AudioDeviceID deviceId = kAudioDeviceUnknown;
  6759. UInt32 size = sizeof(AudioDeviceID);
  6760. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
  6761. &propertyAddress, 0, NULL, &size, &deviceId));
  6762. if (deviceId == kAudioDeviceUnknown)
  6763. {
  6764. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  6765. " AdjustMicrophoneSampleRate No default device exists");
  6766. return -1;
  6767. }
  6768. else
  6769. {
  6770. UInt32 transportType = 0;
  6771. AudioObjectPropertyAddress propertyAddressForTP = { kAudioDevicePropertyTransportType,kAudioDevicePropertyScopeInput, 0 };
  6772. UInt32 size = sizeof(UInt32);
  6773. WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId,&propertyAddressForTP, 0, NULL, &size, &transportType));
  6774. if (transportType == 'bltn')
  6775. {
  6776. AudioObjectPropertyAddress
  6777. propertyAddressForFormat = { kAudioDevicePropertyStreamFormat,
  6778. kAudioDevicePropertyScopeInput, 0 };
  6779. err = AudioObjectSetPropertyData(deviceId,
  6780. &propertyAddressForFormat,
  6781. 0,
  6782. NULL,
  6783. sizeof(AudioStreamBasicDescription),
  6784. &(_microphoneDefaultStreamFormat));
  6785. WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
  6786. "AdjustMicrophoneSampleRate set microphone to default format err = %d",err);
  6787. }
  6788. }
  6789. }
  6790. _bMicrophoneDefaultStreamFormatChanged = false;
  6791. }
  6792. return 0;
  6793. }
  6794. WebRtc_Word32 AudioDeviceMac::SetUSBExtenderWithAudioIssue(void* USBExtenderWithAudioIssueList, WebRtc_UWord32 size)
  6795. {
  6796. int32_t USBExtenderWithIssueNum = size / sizeof(USBExtenderWithAudioIssueInfo);
  6797. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  6798. "SetUSBExtenderWithAudioIssue size = %d,USBExtenderWithIssueNum = %d",size,USBExtenderWithIssueNum);
  6799. USBExtenderWithAudioIssueInfo* pUSBExtenderWithIssue = static_cast<USBExtenderWithAudioIssueInfo*>(USBExtenderWithAudioIssueList);
  6800. for (int i = 0; i < USBExtenderWithIssueNum; i++)
  6801. {
  6802. _mUSBExtenderWithAudioIssueVec.push_back((USBExtenderWithAudioIssueInfo){pUSBExtenderWithIssue->extenderPID,pUSBExtenderWithIssue->extenderVID});
  6803. WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
  6804. "SetUSBExtenderWithAudioIssue index = %d,USBExtenderVID = 0x%x,USBExtenderPID = 0x%x",i,pUSBExtenderWithIssue->extenderVID,pUSBExtenderWithIssue->extenderPID);
  6805. pUSBExtenderWithIssue++;
  6806. }
  6807. return 0;
  6808. }
  6809. } // namespace webrtc
添加新批注
在作者公开此批注前,只有你和作者可见。
回复批注