Bug 923992 - Call GetDefaultAudioEndpoint when needed rather than caching its result when called at cubeb init time. r=padenot

This commit is contained in:
Matthew Gregan 2013-11-28 14:12:45 +13:00
parent 2ca1434c82
commit 3e21b06488
2 changed files with 92 additions and 48 deletions

View File

@ -5,4 +5,4 @@ Makefile.in build files for the Mozilla build system.
The cubeb git repository is: git://github.com/kinetiknz/cubeb.git
The git commit ID used was e92a27c96c0efd33acf983e4c873376ff4cae3d8.
The git commit ID used was a137c78c4ba1a6b0ebde5548664bdf6390ef5437.

View File

@ -88,7 +88,6 @@ extern cubeb_ops const wasapi_ops;
struct cubeb
{
cubeb_ops const * ops;
IMMDevice * device;
/* Library dynamically opened to increase the render
* thread priority, and the two function pointers we need. */
HMODULE mmcss_module;
@ -400,7 +399,7 @@ wasapi_stream_render_loop(LPVOID stream)
default:
LOG("case %d not handled in render loop.", waitResult);
abort();
};
}
}
if (FAILED(hr)) {
@ -424,13 +423,38 @@ BOOL WINAPI revert_mm_thread_characteristics_noop(HANDLE mmcss_handle)
{
return true;
}
HRESULT get_default_endpoint(IMMDevice ** device)
{
IMMDeviceEnumerator * enumerator;
HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
NULL, CLSCTX_INPROC_SERVER,
IID_PPV_ARGS(&enumerator));
if (FAILED(hr)) {
LOG("Could not get device enumerator.");
return hr;
}
/* eMultimedia is okay for now ("Music, movies, narration, [...]").
* We will need to change this when we distinguish streams by use-case, other
* possible values being eConsole ("Games, system notification sounds [...]")
* and eCommunication ("Voice communication"). */
hr = enumerator->GetDefaultAudioEndpoint(eRender, eMultimedia, device);
if (FAILED(hr)) {
LOG("Could not get default audio endpoint.");
SafeRelease(enumerator);
return hr;
}
SafeRelease(enumerator);
return ERROR_SUCCESS;
}
} // namespace anonymous
extern "C" {
int wasapi_init(cubeb ** context, char const * context_name)
{
HRESULT hr;
IMMDeviceEnumerator * enumerator = NULL;
hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
if (FAILED(hr)) {
@ -438,33 +462,21 @@ int wasapi_init(cubeb ** context, char const * context_name)
return CUBEB_ERROR;
}
/* We don't use the device yet, but need to make sure we can initialize one
so that this backend is not incorrectly enabled on platforms that don't
support WASAPI. */
IMMDevice * device;
hr = get_default_endpoint(&device);
if (FAILED(hr)) {
LOG("Could not get device.");
return CUBEB_ERROR;
}
SafeRelease(device);
cubeb * ctx = (cubeb *)calloc(1, sizeof(cubeb));
ctx->ops = &wasapi_ops;
hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
NULL, CLSCTX_INPROC_SERVER,
IID_PPV_ARGS(&enumerator));
if (FAILED(hr)) {
LOG("Could not get device enumerator.");
wasapi_destroy(ctx);
return CUBEB_ERROR;
}
/* eMultimedia is okay for now ("Music, movies, narration, [...]").
* We will need to change this when we distinguish streams by use-case, other
* possible values being eConsole ("Games, system notification sounds [...]")
* and eCommunication ("Voice communication"). */
hr = enumerator->GetDefaultAudioEndpoint(eRender,
eMultimedia,
&ctx->device);
if (FAILED(hr)) {
LOG("Could not get default audio endpoint.");
SafeRelease(enumerator);
wasapi_destroy(ctx);
return CUBEB_ERROR;
}
ctx->mmcss_module = LoadLibraryA("Avrt.dll");
if (ctx->mmcss_module) {
@ -485,8 +497,6 @@ int wasapi_init(cubeb ** context, char const * context_name)
ctx->set_mm_thread_characteristics = &set_mm_thread_characteristics_noop;
ctx->revert_mm_thread_characteristics = &revert_mm_thread_characteristics_noop;
}
SafeRelease(enumerator);
*context = ctx;
@ -498,7 +508,6 @@ namespace {
void wasapi_destroy(cubeb * context)
{
SafeRelease(context->device);
if (context->mmcss_module) {
FreeLibrary(context->mmcss_module);
}
@ -513,21 +522,28 @@ char const* wasapi_get_backend_id(cubeb * context)
int
wasapi_get_max_channel_count(cubeb * ctx, uint32_t * max_channels)
{
HRESULT hr;
IAudioClient * client;
WAVEFORMATEX * mix_format;
assert(ctx && max_channels);
hr = ctx->device->Activate(__uuidof(IAudioClient),
CLSCTX_INPROC_SERVER,
NULL, (void **)&client);
IMMDevice * device;
HRESULT hr = get_default_endpoint(&device);
if (FAILED(hr)) {
return CUBEB_ERROR;
}
hr = device->Activate(__uuidof(IAudioClient),
CLSCTX_INPROC_SERVER,
NULL, (void **)&client);
SafeRelease(device);
if (FAILED(hr)) {
return CUBEB_ERROR;
}
hr = client->GetMixFormat(&mix_format);
if (FAILED(hr)) {
SafeRelease(client);
return CUBEB_ERROR;
}
@ -546,16 +562,26 @@ wasapi_get_min_latency(cubeb * ctx, cubeb_stream_params params, uint32_t * laten
IAudioClient * client;
REFERENCE_TIME default_period;
hr = ctx->device->Activate(__uuidof(IAudioClient),
CLSCTX_INPROC_SERVER,
NULL, (void **)&client);
IMMDevice * device;
hr = get_default_endpoint(&device);
if (FAILED(hr)) {
return CUBEB_ERROR;
}
hr = device->Activate(__uuidof(IAudioClient),
CLSCTX_INPROC_SERVER,
NULL, (void **)&client);
SafeRelease(device);
if (FAILED(hr)) {
return CUBEB_ERROR;
}
/* The second parameter is for exclusive mode, that we don't use. */
hr= client->GetDevicePeriod(&default_period, NULL);
hr = client->GetDevicePeriod(&default_period, NULL);
if (FAILED(hr)) {
SafeRelease(client);
return CUBEB_ERROR;
}
/* According to the docs, the best latency we can achieve is by synchronizing
* the stream and the engine.
@ -574,16 +600,21 @@ wasapi_get_preferred_sample_rate(cubeb * ctx, uint32_t * rate)
IAudioClient * client;
WAVEFORMATEX * mix_format;
hr = ctx->device->Activate(__uuidof(IAudioClient),
CLSCTX_INPROC_SERVER,
NULL, (void **)&client);
IMMDevice * device;
hr = get_default_endpoint(&device);
if (FAILED(hr)) {
return CUBEB_ERROR;
}
hr = device->Activate(__uuidof(IAudioClient),
CLSCTX_INPROC_SERVER,
NULL, (void **)&client);
SafeRelease(device);
if (FAILED(hr)) {
return CUBEB_ERROR;
}
hr = client->GetMixFormat(&mix_format);
if (FAILED(hr)) {
SafeRelease(client);
return CUBEB_ERROR;
@ -711,11 +742,19 @@ wasapi_stream_init(cubeb * context, cubeb_stream ** stream,
return CUBEB_ERROR;
}
IMMDevice * device;
hr = get_default_endpoint(&device);
if (FAILED(hr)) {
wasapi_stream_destroy(stm);
return CUBEB_ERROR;
}
/* Get a client. We will get all other interfaces we need from
* this pointer. */
hr = context->device->Activate(__uuidof(IAudioClient),
CLSCTX_INPROC_SERVER,
NULL, (void **)&stm->client);
hr = device->Activate(__uuidof(IAudioClient),
CLSCTX_INPROC_SERVER,
NULL, (void **)&stm->client);
SafeRelease(device);
if (FAILED(hr)) {
LOG("Could not activate the device to get an audio client.");
wasapi_stream_destroy(stm);
@ -724,7 +763,12 @@ wasapi_stream_init(cubeb * context, cubeb_stream ** stream,
/* We have to distinguish between the format the mixer uses,
* and the format the stream we want to play uses. */
stm->client->GetMixFormat(&mix_format);
hr = stm->client->GetMixFormat(&mix_format);
if (FAILED(hr)) {
LOG("Could not fetch current mix format from the audio client.");
wasapi_stream_destroy(stm);
return CUBEB_ERROR;
}
handle_channel_layout(stm, &mix_format, &stream_params);
@ -810,7 +854,7 @@ wasapi_stream_init(cubeb * context, cubeb_stream ** stream,
}
hr = stm->client->GetService(__uuidof(IAudioRenderClient),
(void **)&stm->render_client);
(void **)&stm->render_client);
if (FAILED(hr)) {
LOG("Could not get the render client.");
wasapi_stream_destroy(stm);
@ -818,7 +862,7 @@ wasapi_stream_init(cubeb * context, cubeb_stream ** stream,
}
hr = stm->client->GetService(__uuidof(IAudioClock),
(void **)&stm->audio_clock);
(void **)&stm->audio_clock);
if (FAILED(hr)) {
LOG("Could not get the IAudioClock.");
wasapi_stream_destroy(stm);