Bug 987979: Patch 2 - Rollup of changes previously applied to media/webrtc/trunk/webrtc rs=jesup

This commit is contained in:
Randell Jesup 2014-05-29 17:05:14 -04:00
parent 0654f9ad2b
commit 21318d2311
191 changed files with 3140 additions and 1957 deletions

View File

@ -43,6 +43,8 @@
#include "webrtc/voice_engine/include/voe_call_report.h"
// Video Engine
// conflicts with #include of scoped_ptr.h
#undef FF
#include "webrtc/video_engine/include/vie_base.h"
#include "webrtc/video_engine/include/vie_codec.h"
#include "webrtc/video_engine/include/vie_render.h"

View File

@ -751,7 +751,8 @@ WebrtcAudioConduit::ReceivedRTPPacket(const void *data, int len)
}
#endif
if(mPtrVoENetwork->ReceivedRTPPacket(mChannel,data,len) == -1)
// XXX we need to get passed the time the packet was received
if(mPtrVoENetwork->ReceivedRTPPacket(mChannel, data, len) == -1)
{
int error = mPtrVoEBase->LastError();
CSFLogError(logTag, "%s RTP Processing Error %d", __FUNCTION__, error);

View File

@ -18,6 +18,7 @@
#include "nsIPrefService.h"
#include "nsIPrefBranch.h"
#include "webrtc/common_types.h"
#include "webrtc/common_video/interface/native_handle.h"
#include "webrtc/video_engine/include/vie_errors.h"
@ -1025,7 +1026,8 @@ WebrtcVideoConduit::ReceivedRTPPacket(const void *data, int len)
if(mEngineReceiving)
{
// let the engine know of a RTP packet to decode
if(mPtrViENetwork->ReceivedRTPPacket(mChannel,data,len) == -1)
// XXX we need to get passed the time the packet was received
if(mPtrViENetwork->ReceivedRTPPacket(mChannel, data, len, webrtc::PacketTime()) == -1)
{
int error = mPtrViEBase->LastError();
CSFLogError(logTag, "%s RTP Processing Failed %d ", __FUNCTION__, error);

View File

@ -11,6 +11,8 @@
#include "MediaConduitInterface.h"
#include "MediaEngineWrapper.h"
// conflicts with #include of scoped_ptr.h
#undef FF
// Video Engine Includes
#include "webrtc/common_types.h"
#ifdef FF

View File

@ -23,8 +23,30 @@
'cflags!': [
'-mfpu=vfpv3-d16',
],
'cflags_mozilla!': [
'-mfpu=vfpv3-d16',
],
'cflags': [
'-mfpu=neon',
'-flax-vector-conversions',
],
'cflags_mozilla': [
'-mfpu=neon',
'-flax-vector-conversions',
],
'asflags!': [
'-mfpu=vfpv3-d16',
],
'asflags_mozilla!': [
'-mfpu=vfpv3-d16',
],
'asflags': [
'-mfpu=neon',
'-flax-vector-conversions',
],
'asflags_mozilla': [
'-mfpu=neon',
'-flax-vector-conversions',
],
}

View File

@ -42,8 +42,14 @@
'modules_java_gyp_path%': '<(modules_java_gyp_path)',
'gen_core_neon_offsets_gyp%': '<(gen_core_neon_offsets_gyp)',
'webrtc_vp8_dir%': '<(webrtc_root)/modules/video_coding/codecs/vp8',
'webrtc_h264_dir%': '<(webrtc_root)/modules/video_coding/codecs/h264',
'rbe_components_path%': '<(webrtc_root)/modules/remote_bitrate_estimator',
'include_g711%': 1,
'include_g722%': 1,
'include_ilbc%': 1,
'include_opus%': 1,
'include_isac%': 1,
'include_pcm16b%': 1,
},
'build_with_chromium%': '<(build_with_chromium)',
'build_with_libjingle%': '<(build_with_libjingle)',
@ -52,7 +58,15 @@
'modules_java_gyp_path%': '<(modules_java_gyp_path)',
'gen_core_neon_offsets_gyp%': '<(gen_core_neon_offsets_gyp)',
'webrtc_vp8_dir%': '<(webrtc_vp8_dir)',
'webrtc_h264_dir%': '<(webrtc_h264_dir)',
'include_g711%': '<(include_g711)',
'include_g722%': '<(include_g722)',
'include_ilbc%': '<(include_ilbc)',
'include_opus%': '<(include_opus)',
'include_isac%': '<(include_isac)',
'include_pcm16b%': '<(include_pcm16b)',
'rbe_components_path%': '<(rbe_components_path)',
# The Chromium common.gypi we use treats all gyp files without
@ -112,6 +126,9 @@
# Exclude internal video render module in Chromium build.
'include_internal_video_render%': 0,
# lazily allocate the ~4MB of trace message buffers if set
'enable_lazy_trace_alloc%': 0,
}, { # Settings for the standalone (not-in-Chromium) build.
# TODO(andrew): For now, disable the Chrome plugins, which causes a
# flood of chromium-style warnings. Investigate enabling them:
@ -130,6 +147,21 @@
'include_tests%': 1,
'restrict_webrtc_logging%': 0,
}],
['OS=="linux"', {
'include_alsa_audio%': 1,
}, {
'include_alsa_audio%': 0,
}],
['OS=="solaris" or os_bsd==1', {
'include_pulse_audio%': 1,
}, {
'include_pulse_audio%': 0,
}],
['OS=="linux" or OS=="solaris" or os_bsd==1', {
'include_v4l2_video_capture%': 1,
}, {
'include_v4l2_video_capture%': 0,
}],
['OS=="ios"', {
'build_libjpeg%': 0,
'enable_protobuf%': 0,
@ -150,6 +182,11 @@
'<(DEPTH)',
],
'conditions': [
['moz_widget_toolkit_gonk==1', {
'defines' : [
'WEBRTC_GONK',
],
}],
['restrict_webrtc_logging==1', {
'defines': ['WEBRTC_RESTRICT_LOGGING',],
}],
@ -197,7 +234,8 @@
],
'conditions': [
['arm_version==7', {
'defines': ['WEBRTC_ARCH_ARM_V7',],
'defines': ['WEBRTC_ARCH_ARM_V7',
'WEBRTC_BUILD_NEON_LIBS'],
'conditions': [
['arm_neon==1', {
'defines': ['WEBRTC_ARCH_ARM_NEON',],
@ -208,6 +246,19 @@
}],
],
}],
['os_bsd==1', {
'defines': [
'WEBRTC_BSD',
'WEBRTC_THREAD_RR',
],
}],
['OS=="dragonfly" or OS=="netbsd"', {
'defines': [
# doesn't support pthread_condattr_setclock
'WEBRTC_CLOCK_TYPE_REALTIME',
],
}],
# Mozilla: if we support Mozilla on MIPS, we'll need to mod the cflags entries here
['target_arch=="mipsel"', {
'defines': [
'MIPS32_LE',
@ -268,6 +319,13 @@
],
}],
['OS=="linux"', {
# 'conditions': [
# ['have_clock_monotonic==1', {
# 'defines': [
# 'WEBRTC_CLOCK_TYPE_REALTIME',
# ],
# }],
# ],
'defines': [
'WEBRTC_LINUX',
],
@ -291,17 +349,18 @@
# Re-enable some warnings that Chromium disables.
'msvs_disabled_warnings!': [4189,],
}],
# used on GONK as well
['enable_android_opensl==1 and (OS=="android" or moz_widget_toolkit_gonk==1)', {
'defines': [
'WEBRTC_ANDROID_OPENSLES',
],
}],
['OS=="android"', {
'defines': [
'WEBRTC_LINUX',
'WEBRTC_ANDROID',
],
'conditions': [
['enable_android_opensl==1', {
'defines': [
'WEBRTC_ANDROID_OPENSLES',
],
}],
['clang!=1', {
# The Android NDK doesn't provide optimized versions of these
# functions. Ensure they are disabled for all compilers.

View File

@ -44,5 +44,7 @@
},
],
},
# }],
# ],
],
}

View File

@ -155,6 +155,7 @@
'resampler/sinc_resampler_sse.cc',
],
'cflags': ['-msse2',],
'cflags_mozilla': ['-msse2',],
'xcode_settings': {
'OTHER_CFLAGS': ['-msse2',],
},

View File

@ -17,98 +17,47 @@
#define WEBRTC_RESAMPLER_RESAMPLER_H_
#include "webrtc/typedefs.h"
#include "speex/speex_resampler.h"
namespace webrtc
{
// TODO(andrew): the implementation depends on the exact values of this enum.
// It should be rewritten in a less fragile way.
#define FIXED_RATE_RESAMPLER 0x10
enum ResamplerType
{
// 4 MSB = Number of channels
// 4 LSB = Synchronous or asynchronous
kResamplerSynchronous = 0x10,
kResamplerAsynchronous = 0x11,
kResamplerSynchronousStereo = 0x20,
kResamplerAsynchronousStereo = 0x21,
kResamplerInvalid = 0xff
};
// TODO(andrew): doesn't need to be part of the interface.
enum ResamplerMode
{
kResamplerMode1To1,
kResamplerMode1To2,
kResamplerMode1To3,
kResamplerMode1To4,
kResamplerMode1To6,
kResamplerMode1To12,
kResamplerMode2To3,
kResamplerMode2To11,
kResamplerMode4To11,
kResamplerMode8To11,
kResamplerMode11To16,
kResamplerMode11To32,
kResamplerMode2To1,
kResamplerMode3To1,
kResamplerMode4To1,
kResamplerMode6To1,
kResamplerMode12To1,
kResamplerMode3To2,
kResamplerMode11To2,
kResamplerMode11To4,
kResamplerMode11To8
kResamplerSynchronous = 0x00,
kResamplerSynchronousStereo = 0x01,
kResamplerFixedSynchronous = 0x00 | FIXED_RATE_RESAMPLER,
kResamplerFixedSynchronousStereo = 0x01 | FIXED_RATE_RESAMPLER,
};
class Resampler
{
public:
Resampler();
// TODO(andrew): use an init function instead.
Resampler(int inFreq, int outFreq, ResamplerType type);
Resampler(int in_freq, int out_freq, ResamplerType type);
~Resampler();
// Reset all states
int Reset(int inFreq, int outFreq, ResamplerType type);
int Reset(int in_freq, int out_freq, ResamplerType type);
// Reset all states if any parameter has changed
int ResetIfNeeded(int inFreq, int outFreq, ResamplerType type);
int ResetIfNeeded(int in_freq, int out_freq, ResamplerType type);
// Synchronous resampling, all output samples are written to samplesOut
int Push(const int16_t* samplesIn, int lengthIn, int16_t* samplesOut,
int maxLen, int &outLen);
// Asynchronous resampling, input
int Insert(int16_t* samplesIn, int lengthIn);
// Asynchronous resampling output, remaining samples are buffered
int Pull(int16_t* samplesOut, int desiredLen, int &outLen);
int Push(const int16_t* samples_in, int length_in,
int16_t* samples_out, int max_len, int &out_len);
private:
// Generic pointers since we don't know what states we'll need
void* state1_;
void* state2_;
void* state3_;
bool IsFixedRate() { return !!(type_ & FIXED_RATE_RESAMPLER); }
// Storage if needed
int16_t* in_buffer_;
int16_t* out_buffer_;
int in_buffer_size_;
int out_buffer_size_;
int in_buffer_size_max_;
int out_buffer_size_max_;
SpeexResamplerState* state_;
// State
int my_in_frequency_khz_;
int my_out_frequency_khz_;
ResamplerMode my_mode_;
ResamplerType my_type_;
// Extra instance for stereo
Resampler* slave_left_;
Resampler* slave_right_;
int in_freq_;
int out_freq_;
int channels_;
ResamplerType type_;
};
} // namespace webrtc

View File

@ -13,7 +13,6 @@
#include <string.h>
#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/common_audio/resampler/include/resampler.h"
#include "webrtc/common_audio/resampler/push_sinc_resampler.h"
namespace webrtc {

File diff suppressed because it is too large Load Diff

View File

@ -8,6 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include <math.h>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_audio/resampler/include/resampler.h"
@ -18,10 +20,7 @@ namespace webrtc {
namespace {
const ResamplerType kTypes[] = {
kResamplerSynchronous,
kResamplerAsynchronous,
kResamplerSynchronousStereo,
kResamplerAsynchronousStereo
// kResamplerInvalid excluded
};
const size_t kTypesSize = sizeof(kTypes) / sizeof(*kTypes);
@ -31,7 +30,7 @@ const int kRates[] = {
8000,
16000,
32000,
44000,
44100,
48000,
kMaxRate
};
@ -39,26 +38,19 @@ const size_t kRatesSize = sizeof(kRates) / sizeof(*kRates);
const int kMaxChannels = 2;
const size_t kDataSize = static_cast<size_t> (kMaxChannels * kMaxRate / 100);
// TODO(andrew): should we be supporting these combinations?
bool ValidRates(int in_rate, int out_rate) {
// Not the most compact notation, for clarity.
if ((in_rate == 44000 && (out_rate == 48000 || out_rate == 96000)) ||
(out_rate == 44000 && (in_rate == 48000 || in_rate == 96000))) {
return false;
}
return true;
}
class ResamplerTest : public testing::Test {
protected:
ResamplerTest();
virtual void SetUp();
virtual void TearDown();
void RunResampleTest(int channels,
int src_sample_rate_hz,
int dst_sample_rate_hz);
Resampler rs_;
int16_t data_in_[kDataSize];
int16_t data_out_[kDataSize];
int16_t data_reference_[kDataSize];
};
ResamplerTest::ResamplerTest() {}
@ -83,34 +75,119 @@ TEST_F(ResamplerTest, Reset) {
ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j]
<< ", type: " << kTypes[k];
SCOPED_TRACE(ss.str());
if (ValidRates(kRates[i], kRates[j]))
EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
else
EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kTypes[k]));
}
}
}
}
// TODO(tlegrand): Replace code inside the two tests below with a function
// with number of channels and ResamplerType as input.
TEST_F(ResamplerTest, Synchronous) {
for (size_t i = 0; i < kRatesSize; ++i) {
for (size_t j = 0; j < kRatesSize; ++j) {
std::ostringstream ss;
ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
SCOPED_TRACE(ss.str());
// Sets the signal value to increase by |data| with every sample. Floats are
// used so non-integer values result in rounding error, but not an accumulating
// error.
void SetMonoFrame(int16_t* buffer, float data, int sample_rate_hz) {
for (int i = 0; i < sample_rate_hz / 100; i++) {
buffer[i] = data * i;
}
}
if (ValidRates(kRates[i], kRates[j])) {
int in_length = kRates[i] / 100;
int out_length = 0;
EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kResamplerSynchronous));
EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
out_length));
EXPECT_EQ(kRates[j] / 100, out_length);
} else {
EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kResamplerSynchronous));
}
// Sets the signal value to increase by |left| and |right| with every sample in
// each channel respectively.
void SetStereoFrame(int16_t* buffer, float left, float right,
int sample_rate_hz) {
for (int i = 0; i < sample_rate_hz / 100; i++) {
buffer[i * 2] = left * i;
buffer[i * 2 + 1] = right * i;
}
}
// Computes the best SNR based on the error between |ref_frame| and
// |test_frame|. It allows for a sample delay between the signals to
// compensate for the resampling delay.
float ComputeSNR(const int16_t* reference, const int16_t* test,
int sample_rate_hz, int channels, int max_delay) {
float best_snr = 0;
int best_delay = 0;
int samples_per_channel = sample_rate_hz/100;
for (int delay = 0; delay < max_delay; delay++) {
float mse = 0;
float variance = 0;
for (int i = 0; i < samples_per_channel * channels - delay; i++) {
int error = reference[i] - test[i + delay];
mse += error * error;
variance += reference[i] * reference[i];
}
float snr = 100; // We assign 100 dB to the zero-error case.
if (mse > 0)
snr = 10 * log10(variance / mse);
if (snr > best_snr) {
best_snr = snr;
best_delay = delay;
}
}
printf("SNR=%.1f dB at delay=%d\n", best_snr, best_delay);
return best_snr;
}
void ResamplerTest::RunResampleTest(int channels,
int src_sample_rate_hz,
int dst_sample_rate_hz) {
Resampler resampler; // Create a new one with every test.
const int16_t kSrcLeft = 60; // Shouldn't overflow for any used sample rate.
const int16_t kSrcRight = 30;
const float kResamplingFactor = (1.0 * src_sample_rate_hz) /
dst_sample_rate_hz;
const float kDstLeft = kResamplingFactor * kSrcLeft;
const float kDstRight = kResamplingFactor * kSrcRight;
if (channels == 1)
SetMonoFrame(data_in_, kSrcLeft, src_sample_rate_hz);
else
SetStereoFrame(data_in_, kSrcLeft, kSrcRight, src_sample_rate_hz);
if (channels == 1) {
SetMonoFrame(data_out_, 0, dst_sample_rate_hz);
SetMonoFrame(data_reference_, kDstLeft, dst_sample_rate_hz);
} else {
SetStereoFrame(data_out_, 0, 0, dst_sample_rate_hz);
SetStereoFrame(data_reference_, kDstLeft, kDstRight, dst_sample_rate_hz);
}
// The speex resampler has a known delay dependent on quality and rates,
// which we approximate here. Multiplying by two gives us a crude maximum
// for any resampling, as the old resampler typically (but not always)
// has lower delay. The actual delay is calculated internally based on the
// filter length in the QualityMap.
static const int kInputKernelDelaySamples = 16*3;
const int max_delay = std::min(1.0f, 1/kResamplingFactor) *
kInputKernelDelaySamples * channels * 2;
printf("(%d, %d Hz) -> (%d, %d Hz) ", // SNR reported on the same line later.
channels, src_sample_rate_hz, channels, dst_sample_rate_hz);
int in_length = channels * src_sample_rate_hz / 100;
int out_length = 0;
EXPECT_EQ(0, rs_.Reset(src_sample_rate_hz, dst_sample_rate_hz,
(channels == 1 ?
kResamplerSynchronous :
kResamplerSynchronousStereo)));
EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
out_length));
EXPECT_EQ(channels * dst_sample_rate_hz / 100, out_length);
// EXPECT_EQ(0, Resample(src_frame_, &resampler, &dst_frame_));
EXPECT_GT(ComputeSNR(data_reference_, data_out_, dst_sample_rate_hz,
channels, max_delay), 40.0f);
}
TEST_F(ResamplerTest, Synchronous) {
// Number of channels is 1, mono mode.
const int kChannels = 1;
// We don't attempt to be exhaustive here, but just get good coverage. Some
// combinations of rates will not be resampled, and some give an odd
// resampling factor which makes it more difficult to evaluate.
const int kSampleRates[] = {16000, 32000, 44100, 48000};
const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
for (int src_rate = 0; src_rate < kSampleRatesSize; src_rate++) {
for (int dst_rate = 0; dst_rate < kSampleRatesSize; dst_rate++) {
RunResampleTest(kChannels, kSampleRates[src_rate], kSampleRates[dst_rate]);
}
}
}
@ -118,24 +195,14 @@ TEST_F(ResamplerTest, Synchronous) {
TEST_F(ResamplerTest, SynchronousStereo) {
// Number of channels is 2, stereo mode.
const int kChannels = 2;
for (size_t i = 0; i < kRatesSize; ++i) {
for (size_t j = 0; j < kRatesSize; ++j) {
std::ostringstream ss;
ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
SCOPED_TRACE(ss.str());
if (ValidRates(kRates[i], kRates[j])) {
int in_length = kChannels * kRates[i] / 100;
int out_length = 0;
EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j],
kResamplerSynchronousStereo));
EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
out_length));
EXPECT_EQ(kChannels * kRates[j] / 100, out_length);
} else {
EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j],
kResamplerSynchronousStereo));
}
// We don't attempt to be exhaustive here, but just get good coverage. Some
// combinations of rates will not be resampled, and some give an odd
// resampling factor which makes it more difficult to evaluate.
const int kSampleRates[] = {16000, 32000, 44100, 48000};
const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
for (int src_rate = 0; src_rate < kSampleRatesSize; src_rate++) {
for (int dst_rate = 0; dst_rate < kSampleRatesSize; dst_rate++) {
RunResampleTest(kChannels, kSampleRates[src_rate], kSampleRates[dst_rate]);
}
}
}

View File

@ -26,11 +26,11 @@ float SincResampler::Convolve_NEON(const float* input_ptr, const float* k1,
const float* upper = input_ptr + kKernelSize;
for (; input_ptr < upper; ) {
m_input = vld1q_f32(input_ptr);
m_input = vld1q_f32((const float32_t *) input_ptr);
input_ptr += 4;
m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32(k1));
m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32((const float32_t *) k1));
k1 += 4;
m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32(k2));
m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32((const float32_t *) k2));
k2 += 4;
}

View File

@ -11,6 +11,7 @@
#ifndef WEBRTC_COMMON_TYPES_H_
#define WEBRTC_COMMON_TYPES_H_
#include <stddef.h> // size_t
#include "webrtc/typedefs.h"
#if defined(_MSC_VER)
@ -434,7 +435,7 @@ enum NsModes // type of Noise Suppression
kNsLowSuppression, // lowest suppression
kNsModerateSuppression,
kNsHighSuppression,
kNsVeryHighSuppression, // highest suppression
kNsVeryHighSuppression // highest suppression
};
enum AgcModes // type of Automatic Gain Control
@ -459,7 +460,7 @@ enum EcModes // type of Echo Control
kEcDefault, // platform default
kEcConference, // conferencing default (aggressive AEC)
kEcAec, // Acoustic Echo Cancellation
kEcAecm, // AEC mobile
kEcAecm // AEC mobile
};
// AECM modes
@ -511,7 +512,7 @@ enum NetEqModes // NetEQ playout configurations
kNetEqFax = 2,
// Minimal buffer management. Inserts zeros for lost packets and during
// buffer increases.
kNetEqOff = 3,
kNetEqOff = 3
};
enum OnHoldModes // On Hold direction
@ -525,7 +526,7 @@ enum AmrMode
{
kRfc3267BwEfficient = 0,
kRfc3267OctetAligned = 1,
kRfc3267FileStorage = 2,
kRfc3267FileStorage = 2
};
// ==================================================================
@ -598,10 +599,27 @@ struct VideoCodecVP8
int keyFrameInterval;
};
// H264 specific
struct VideoCodecH264
{
uint8_t profile;
uint8_t constraints;
uint8_t level;
uint8_t packetizationMode; // 0 or 1
bool frameDroppingOn;
int keyFrameInterval;
// These are null/0 if not externally negotiated
const uint8_t* spsData;
size_t spsLen;
const uint8_t* ppsData;
size_t ppsLen;
};
// Video codec types
enum VideoCodecType
{
kVideoCodecVP8,
kVideoCodecH264,
kVideoCodecI420,
kVideoCodecRED,
kVideoCodecULPFEC,
@ -612,6 +630,7 @@ enum VideoCodecType
union VideoCodecUnion
{
VideoCodecVP8 VP8;
VideoCodecH264 H264;
};
@ -688,6 +707,25 @@ struct OverUseDetectorOptions {
double initial_threshold;
};
enum CPULoadState {
kLoadRelaxed,
kLoadNormal,
kLoadStressed
};
class CPULoadStateObserver {
public:
virtual void onLoadStateChanged(CPULoadState aNewState) = 0;
virtual ~CPULoadStateObserver() {};
};
class CPULoadStateCallbackInvoker {
public:
virtual void AddObserver(CPULoadStateObserver* aObserver) = 0;
virtual void RemoveObserver(CPULoadStateObserver* aObserver) = 0;
virtual ~CPULoadStateCallbackInvoker() {};
};
// This structure will have the information about when packet is actually
// received by socket.
struct PacketTime {

View File

@ -35,7 +35,9 @@
#define WEBRTC_CODEC_AVT
// PCM16 is useful for testing and incurs only a small binary size cost.
#ifndef WEBRTC_CODEC_PCM16
#define WEBRTC_CODEC_PCM16
#endif
// iLBC, G.722, and Redundancy coding are excluded from Chromium and Mozilla
// builds to reduce binary size.

View File

@ -15,7 +15,7 @@
['build_with_mozilla==1', {
# Mozilla provides its own build of the opus library.
'include_dirs': [
'$(DIST)/include/opus',
'/media/libopus/include',
]
}, {
'dependencies': [

View File

@ -15,10 +15,6 @@
#include "typedefs.h"
#ifdef WEBRTC_ARCH_BIG_ENDIAN
#include "signal_processing_library.h"
#endif
#define HIGHEND 0xFF00
#define LOWEND 0xFF
@ -30,7 +26,7 @@ int16_t WebRtcPcm16b_EncodeW16(int16_t *speechIn16b,
int16_t *speechOut16b)
{
#ifdef WEBRTC_ARCH_BIG_ENDIAN
WEBRTC_SPL_MEMCPY_W16(speechOut16b, speechIn16b, len);
memcpy(speechOut16b, speechIn16b, len * sizeof(int16_t));
#else
int i;
for (i=0;i<len;i++) {
@ -69,7 +65,7 @@ int16_t WebRtcPcm16b_DecodeW16(void *inst,
int16_t* speechType)
{
#ifdef WEBRTC_ARCH_BIG_ENDIAN
WEBRTC_SPL_MEMCPY_W8(speechOut16b, speechIn16b, ((len*sizeof(int16_t)+1)>>1));
memcpy(speechOut16b, speechIn16b, ((len*sizeof(int16_t)+1)>>1));
#else
int i;
int samples=len>>1;

View File

@ -46,12 +46,6 @@
'acm_common_defs.h',
'acm_dtmf_playout.cc',
'acm_dtmf_playout.h',
'acm_g722.cc',
'acm_g722.h',
'acm_g7221.cc',
'acm_g7221.h',
'acm_g7221c.cc',
'acm_g7221c.h',
'acm_g729.cc',
'acm_g729.h',
'acm_g7291.cc',
@ -60,11 +54,6 @@
'acm_generic_codec.h',
'acm_gsmfr.cc',
'acm_gsmfr.h',
'acm_ilbc.cc',
'acm_ilbc.h',
'acm_isac.cc',
'acm_isac.h',
'acm_isac_macros.h',
'acm_opus.cc',
'acm_opus.h',
'acm_speex.cc',

View File

@ -10,12 +10,6 @@
'variables': {
'audio_coding_dependencies': [
'CNG',
'G711',
'G722',
'iLBC',
'iSAC',
'iSACFix',
'PCM16B',
'NetEq',
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
@ -26,6 +20,26 @@
'audio_coding_dependencies': ['webrtc_opus',],
'audio_coding_defines': ['WEBRTC_CODEC_OPUS',],
}],
['include_g711==1', {
'audio_coding_dependencies': ['G711',],
'audio_coding_defines': ['WEBRTC_CODEC_G711',],
}],
['include_g722==1', {
'audio_coding_dependencies': ['G722',],
'audio_coding_defines': ['WEBRTC_CODEC_G722',],
}],
['include_ilbc==1', {
'audio_coding_dependencies': ['iLBC',],
'audio_coding_defines': ['WEBRTC_CODEC_ILBC',],
}],
['include_isac==1', {
'audio_coding_dependencies': ['iSAC', 'iSACFix',],
'audio_coding_defines': ['WEBRTC_CODEC_ISAC', 'WEBRTC_CODEC_ISACFX',],
}],
['include_pcm16b==1', {
'audio_coding_dependencies': ['PCM16B',],
'audio_coding_defines': ['WEBRTC_CODEC_PCM16',],
}],
],
},
'targets': [
@ -54,12 +68,6 @@
'sources': [
'../interface/audio_coding_module.h',
'../interface/audio_coding_module_typedefs.h',
'acm_amr.cc',
'acm_amr.h',
'acm_amrwb.cc',
'acm_amrwb.h',
'acm_celt.cc',
'acm_celt.h',
'acm_cng.cc',
'acm_cng.h',
'acm_codec_database.cc',
@ -68,37 +76,10 @@
'acm_dtmf_detection.h',
'acm_dtmf_playout.cc',
'acm_dtmf_playout.h',
'acm_g722.cc',
'acm_g722.h',
'acm_g7221.cc',
'acm_g7221.h',
'acm_g7221c.cc',
'acm_g7221c.h',
'acm_g729.cc',
'acm_g729.h',
'acm_g7291.cc',
'acm_g7291.h',
'acm_generic_codec.cc',
'acm_generic_codec.h',
'acm_gsmfr.cc',
'acm_gsmfr.h',
'acm_ilbc.cc',
'acm_ilbc.h',
'acm_isac.cc',
'acm_isac.h',
'acm_isac_macros.h',
'acm_neteq.cc',
'acm_neteq.h',
'acm_opus.cc',
'acm_opus.h',
'acm_speex.cc',
'acm_speex.h',
'acm_pcm16b.cc',
'acm_pcm16b.h',
'acm_pcma.cc',
'acm_pcma.h',
'acm_pcmu.cc',
'acm_pcmu.h',
'acm_red.cc',
'acm_red.h',
'acm_resampler.cc',
@ -106,6 +87,51 @@
'audio_coding_module_impl.cc',
'audio_coding_module_impl.h',
],
'conditions': [
['include_opus==1', {
'sources': [
'acm_opus.cc',
'acm_opus.h',
],
}],
['include_g711==1', {
'sources': [
'acm_pcma.cc',
'acm_pcma.h',
'acm_pcmu.cc',
'acm_pcmu.h',
],
}],
['include_g722==1', {
'sources': [
'acm_g722.cc',
'acm_g722.h',
'acm_g7221.cc',
'acm_g7221.h',
'acm_g7221c.cc',
'acm_g7221c.h',
],
}],
['include_ilbc==1', {
'sources': [
'acm_ilbc.cc',
'acm_ilbc.h',
],
}],
['include_isac==1', {
'sources': [
'acm_isac.cc',
'acm_isac.h',
'acm_isac_macros.h',
],
}],
['include_pcm16b==1', {
'sources': [
'acm_pcm16b.cc',
'acm_pcm16b.h',
],
}],
],
},
],
'conditions': [

View File

@ -69,6 +69,8 @@
* decoded signal is at 32 kHz.
* NETEQ_ISAC_FB_CODEC Enable iSAC-FB
*
* NETEQ_OPUS_CODEC Enable Opus
*
* NETEQ_G722_CODEC Enable G.722
*
* NETEQ_G729_CODEC Enable G.729
@ -304,6 +306,9 @@
#define NETEQ_G722_1C_CODEC
#define NETEQ_CELT_CODEC
/* hack in 48 kHz support */
#define NETEQ_48KHZ_WIDEBAND
/* Fullband 48 kHz codecs */
#define NETEQ_OPUS_CODEC
#define NETEQ_ISAC_FB_CODEC

View File

@ -678,6 +678,11 @@ int WebRtcNetEQ_GetDefaultCodecSettings(const enum WebRtcNetEQDecoder *codecID,
codecBytes = 15300; /* 240ms @ 510kbps (60ms frames) */
codecBuffers = 30; /* Replicating the value for PCMu/a */
}
else if (codecID[i] == kDecoderOpus)
{
codecBytes = 15300; /* 240ms @ 510kbps (60ms frames) */
codecBuffers = 30; /* ?? Codec supports down to 2.5-60 ms frames */
}
else if ((codecID[i] == kDecoderPCM16B) ||
(codecID[i] == kDecoderPCM16B_2ch))
{

View File

@ -10,17 +10,25 @@
'variables': {
'neteq_dependencies': [
'G711',
'G722',
'PCM16B',
'iLBC',
'iSAC',
'iSACFix',
'CNG',
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
],
'neteq_defines': [],
'conditions': [
['include_g722==1', {
'neteq_dependencies': ['G722'],
'neteq_defines': ['WEBRTC_CODEC_G722',],
}],
['include_ilbc==1', {
'neteq_dependencies': ['iLBC'],
'neteq_defines': ['WEBRTC_CODEC_ILBC',],
}],
['include_isac==1', {
'neteq_dependencies': ['iSAC', 'iSACFix',],
'neteq_defines': ['WEBRTC_CODEC_ISAC', 'WEBRTC_CODEC_ISACFIX',],
}],
['include_opus==1', {
'neteq_dependencies': ['webrtc_opus',],
'neteq_defines': ['WEBRTC_CODEC_OPUS',],
@ -129,6 +137,7 @@
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'<(webrtc_root)/test/test.gyp:test_support_main',
],
# FIX for include_isac/etc
'defines': [
'AUDIO_DECODER_UNITTEST',
'WEBRTC_CODEC_G722',

View File

@ -12,6 +12,7 @@
#include <assert.h>
#include "AndroidJNIWrapper.h"
#include "webrtc/modules/utility/interface/helpers_android.h"
#include "webrtc/system_wrappers/interface/trace.h"
@ -51,20 +52,10 @@ void AudioManagerJni::SetAndroidAudioDeviceObjects(void* jvm, void* env,
g_jni_env_ = reinterpret_cast<JNIEnv*>(env);
g_context_ = g_jni_env_->NewGlobalRef(reinterpret_cast<jobject>(context));
// FindClass must be made in this function since this function's contract
// requires it to be called by a Java thread.
// See
// http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
// as to why this is necessary.
// Get the AudioManagerAndroid class object.
jclass javaAmClassLocal = g_jni_env_->FindClass(
"org/webrtc/voiceengine/AudioManagerAndroid");
assert(javaAmClassLocal);
// Create a global reference such that the class object is not recycled by
// the garbage collector.
g_audio_manager_class_ = reinterpret_cast<jclass>(
g_jni_env_->NewGlobalRef(javaAmClassLocal));
g_audio_manager_class_ = jsjni_GetGlobalClassRef(
"org/webrtc/voiceengine/AudioManagerAndroid");
assert(g_audio_manager_class_);
}

View File

@ -34,6 +34,7 @@ class AudioManagerJni {
// It has to be called for this class' APIs to be successful. Calling
// ClearAndroidAudioDeviceObjects will prevent this class' APIs to be called
// successfully if SetAndroidAudioDeviceObjects is not called after it.
static void SetAndroidAudioDeviceObjects(void* jvm, void* context);
static void SetAndroidAudioDeviceObjects(void* jvm, void* env,
void* context);
// This function must be called when the AudioManagerJni class is no

View File

@ -122,7 +122,7 @@ AudioRecordJni::AudioRecordJni(
_recError(0),
_delayRecording(0),
_AGC(false),
_samplingFreqIn((N_REC_SAMPLES_PER_SEC/1000)),
_samplingFreqIn((N_REC_SAMPLES_PER_SEC)),
_recAudioSource(1) { // 1 is AudioSource.MIC which is our default
memset(_recBuffer, 0, sizeof(_recBuffer));
}
@ -419,17 +419,11 @@ int32_t AudioRecordJni::InitRecording() {
jmethodID initRecordingID = env->GetMethodID(_javaScClass, "InitRecording",
"(II)I");
int samplingFreq = 44100;
if (_samplingFreqIn != 44)
{
samplingFreq = _samplingFreqIn * 1000;
}
int retVal = -1;
// call java sc object method
jint res = env->CallIntMethod(_javaScObj, initRecordingID, _recAudioSource,
samplingFreq);
_samplingFreqIn);
if (res < 0)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@ -438,7 +432,7 @@ int32_t AudioRecordJni::InitRecording() {
else
{
// Set the audio device buffer sampling rate
_ptrAudioBuffer->SetRecordingSampleRate(_samplingFreqIn * 1000);
_ptrAudioBuffer->SetRecordingSampleRate(_samplingFreqIn);
// the init rec function returns a fixed delay
_delayRecording = res / _samplingFreqIn;
@ -790,14 +784,7 @@ int32_t AudioRecordJni::SetRecordingSampleRate(const uint32_t samplesPerSec) {
}
// set the recording sample rate to use
if (samplesPerSec == 44100)
{
_samplingFreqIn = 44;
}
else
{
_samplingFreqIn = samplesPerSec / 1000;
}
_samplingFreqin = samplesPerSec;
// Update the AudioDeviceBuffer
_ptrAudioBuffer->SetRecordingSampleRate(samplesPerSec);
@ -997,11 +984,7 @@ int32_t AudioRecordJni::InitSampleRate() {
if (_samplingFreqIn > 0)
{
// read the configured sampling rate
samplingFreq = 44100;
if (_samplingFreqIn != 44)
{
samplingFreq = _samplingFreqIn * 1000;
}
samplingFreq = _samplingFreqIn;
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
" Trying configured recording sampling rate %d",
samplingFreq);
@ -1042,14 +1025,7 @@ int32_t AudioRecordJni::InitSampleRate() {
}
// set the recording sample rate to use
if (samplingFreq == 44100)
{
_samplingFreqIn = 44;
}
else
{
_samplingFreqIn = samplingFreq / 1000;
}
_samplingFreqIn = samplingFreq;
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
"Recording sample rate set to (%d)", _samplingFreqIn);
@ -1141,7 +1117,7 @@ bool AudioRecordJni::RecThreadProcess()
if (_recording)
{
uint32_t samplesToRec = _samplingFreqIn * 10;
uint32_t samplesToRec = _samplingFreqIn / 100;
// Call java sc object method to record data to direct buffer
// Will block until data has been recorded (see java sc class),
@ -1158,7 +1134,7 @@ bool AudioRecordJni::RecThreadProcess()
}
else
{
_delayRecording = recDelayInSamples / _samplingFreqIn;
_delayRecording = (recDelayInSamples * 1000) / _samplingFreqIn;
}
Lock();

View File

@ -117,7 +117,7 @@ AudioTrackJni::AudioTrackJni(const int32_t id)
_playWarning(0),
_playError(0),
_delayPlayout(0),
_samplingFreqOut((N_PLAY_SAMPLES_PER_SEC/1000)),
_samplingFreqOut((N_PLAY_SAMPLES_PER_SEC)),
_maxSpeakerVolume(0) {
}
@ -421,16 +421,10 @@ int32_t AudioTrackJni::InitPlayout() {
jmethodID initPlaybackID = env->GetMethodID(_javaScClass, "InitPlayback",
"(I)I");
int samplingFreq = 44100;
if (_samplingFreqOut != 44)
{
samplingFreq = _samplingFreqOut * 1000;
}
int retVal = -1;
// Call java sc object method
jint res = env->CallIntMethod(_javaScObj, initPlaybackID, samplingFreq);
jint res = env->CallIntMethod(_javaScObj, initPlaybackID, _samplingFreqOut);
if (res < 0)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@ -439,7 +433,7 @@ int32_t AudioTrackJni::InitPlayout() {
else
{
// Set the audio device buffer sampling rate
_ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreqOut * 1000);
_ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreqOut);
_playIsInitialized = true;
retVal = 0;
}
@ -869,14 +863,7 @@ int32_t AudioTrackJni::SetPlayoutSampleRate(const uint32_t samplesPerSec) {
}
// set the playout sample rate to use
if (samplesPerSec == 44100)
{
_samplingFreqOut = 44;
}
else
{
_samplingFreqOut = samplesPerSec / 1000;
}
_samplingFreqOut = samplesPerSec;
// Update the AudioDeviceBuffer
_ptrAudioBuffer->SetPlayoutSampleRate(samplesPerSec);
@ -1162,11 +1149,7 @@ int32_t AudioTrackJni::InitSampleRate() {
if (_samplingFreqOut > 0)
{
// read the configured sampling rate
samplingFreq = 44100;
if (_samplingFreqOut != 44)
{
samplingFreq = _samplingFreqOut * 1000;
}
samplingFreq = _samplingFreqOut;
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
" Trying configured playback sampling rate %d",
samplingFreq);
@ -1220,14 +1203,7 @@ int32_t AudioTrackJni::InitSampleRate() {
}
// set the playback sample rate to use
if (samplingFreq == 44100)
{
_samplingFreqOut = 44;
}
else
{
_samplingFreqOut = samplingFreq / 1000;
}
_samplingFreqOut = samplingFreq;
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
"Playback sample rate set to (%d)", _samplingFreqOut);
@ -1366,7 +1342,7 @@ bool AudioTrackJni::PlayThreadProcess()
else if (res > 0)
{
// we are not recording and have got a delay value from playback
_delayPlayout = res / _samplingFreqOut;
_delayPlayout = (res * 1000) / _samplingFreqOut;
}
Lock();

View File

@ -16,7 +16,14 @@ package org.webrtc.voiceengine;
import android.content.Context;
import android.content.pm.PackageManager;
import android.media.AudioManager;
import android.util.Log;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import org.mozilla.gecko.mozglue.WebRTCJNITarget;
@WebRTCJNITarget
class AudioManagerAndroid {
// Most of Google lead devices use 44.1K as the default sampling rate, 44.1K
// is also widely used on other android devices.

View File

@ -11,6 +11,7 @@
#include "webrtc/modules/audio_device/android/opensles_input.h"
#include <assert.h>
#include <dlfcn.h>
#include "webrtc/modules/audio_device/android/audio_common.h"
#include "webrtc/modules/audio_device/android/opensles_common.h"
@ -63,7 +64,8 @@ OpenSlesInput::OpenSlesInput(
active_queue_(0),
rec_sampling_rate_(0),
agc_enabled_(false),
recording_delay_(0) {
recording_delay_(0),
opensles_lib_(NULL) {
}
OpenSlesInput::~OpenSlesInput() {
@ -81,15 +83,41 @@ void OpenSlesInput::ClearAndroidAudioDeviceObjects() {
int32_t OpenSlesInput::Init() {
assert(!initialized_);
/* Try to dynamically open the OpenSLES library */
opensles_lib_ = dlopen("libOpenSLES.so", RTLD_LAZY);
if (!opensles_lib_) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
" failed to dlopen OpenSLES library");
return -1;
}
f_slCreateEngine = (slCreateEngine_t)dlsym(opensles_lib_, "slCreateEngine");
SL_IID_ENGINE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ENGINE");
SL_IID_BUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_BUFFERQUEUE");
SL_IID_ANDROIDCONFIGURATION_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDCONFIGURATION");
SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDSIMPLEBUFFERQUEUE");
SL_IID_RECORD_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_RECORD");
if (!f_slCreateEngine ||
!SL_IID_ENGINE_ ||
!SL_IID_BUFFERQUEUE_ ||
!SL_IID_ANDROIDCONFIGURATION_ ||
!SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ ||
!SL_IID_RECORD_) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
" failed to find OpenSLES function");
return -1;
}
// Set up OpenSL engine.
OPENSL_RETURN_ON_FAILURE(slCreateEngine(&sles_engine_, 1, kOption, 0,
NULL, NULL),
OPENSL_RETURN_ON_FAILURE(f_slCreateEngine(&sles_engine_, 1, kOption, 0,
NULL, NULL),
-1);
OPENSL_RETURN_ON_FAILURE((*sles_engine_)->Realize(sles_engine_,
SL_BOOLEAN_FALSE),
-1);
OPENSL_RETURN_ON_FAILURE((*sles_engine_)->GetInterface(sles_engine_,
SL_IID_ENGINE,
SL_IID_ENGINE_,
&sles_engine_itf_),
-1);
@ -108,6 +136,7 @@ int32_t OpenSlesInput::Terminate() {
initialized_ = false;
mic_initialized_ = false;
rec_initialized_ = false;
dlclose(opensles_lib_);
return 0;
}
@ -234,6 +263,14 @@ int32_t OpenSlesInput::StereoRecordingIsAvailable(bool& available) { // NOLINT
return 0;
}
int32_t OpenSlesInput::SetStereoRecording(bool enable) { // NOLINT
if (enable) {
return -1;
} else {
return 0;
}
}
int32_t OpenSlesInput::StereoRecording(bool& enabled) const { // NOLINT
enabled = false;
return 0;
@ -277,8 +314,12 @@ void OpenSlesInput::UpdateRecordingDelay() {
}
void OpenSlesInput::UpdateSampleRate() {
#if !defined(WEBRTC_GONK)
rec_sampling_rate_ = audio_manager_.low_latency_supported() ?
audio_manager_.native_output_sample_rate() : kDefaultSampleRate;
#else
rec_sampling_rate_ = kDefaultSampleRate;
#endif
}
void OpenSlesInput::CalculateNumFifoBuffersNeeded() {
@ -352,7 +393,7 @@ bool OpenSlesInput::CreateAudioRecorder() {
// Note the interfaces still need to be initialized. This only tells OpenSl
// that the interfaces will be needed at some point.
const SLInterfaceID id[kNumInterfaces] = {
SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION };
SL_IID_ANDROIDSIMPLEBUFFERQUEUE_, SL_IID_ANDROIDCONFIGURATION_ };
const SLboolean req[kNumInterfaces] = {
SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
OPENSL_RETURN_ON_FAILURE(
@ -370,13 +411,13 @@ bool OpenSlesInput::CreateAudioRecorder() {
SL_BOOLEAN_FALSE),
false);
OPENSL_RETURN_ON_FAILURE(
(*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_RECORD,
(*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_RECORD_,
static_cast<void*>(&sles_recorder_itf_)),
false);
OPENSL_RETURN_ON_FAILURE(
(*sles_recorder_)->GetInterface(
sles_recorder_,
SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
SL_IID_ANDROIDSIMPLEBUFFERQUEUE_,
static_cast<void*>(&sles_recorder_sbq_itf_)),
false);
return true;
@ -520,7 +561,8 @@ bool OpenSlesInput::CbThreadImpl() {
while (fifo_->size() > 0 && recording_) {
int8_t* audio = fifo_->Pop();
audio_buffer_->SetRecordedBuffer(audio, buffer_size_samples());
audio_buffer_->SetVQEData(delay_provider_->PlayoutDelayMs(),
audio_buffer_->SetVQEData(delay_provider_ ?
delay_provider_->PlayoutDelayMs() : 0,
recording_delay_, 0);
audio_buffer_->DeliverRecordedData();
}

View File

@ -15,7 +15,9 @@
#include <SLES/OpenSLES_Android.h>
#include <SLES/OpenSLES_AndroidConfiguration.h>
#if !defined(WEBRTC_GONK)
#include "webrtc/modules/audio_device/android/audio_manager_jni.h"
#endif
#include "webrtc/modules/audio_device/android/low_latency_event.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
@ -105,7 +107,7 @@ class OpenSlesInput {
// Stereo support
int32_t StereoRecordingIsAvailable(bool& available); // NOLINT
int32_t SetStereoRecording(bool enable) { return -1; }
int32_t SetStereoRecording(bool enable);
int32_t StereoRecording(bool& enabled) const; // NOLINT
// Delay information and control
@ -125,7 +127,7 @@ class OpenSlesInput {
// Keep as few OpenSL buffers as possible to avoid wasting memory. 2 is
// minimum for playout. Keep 2 for recording as well.
kNumOpenSlBuffers = 2,
kNum10MsToBuffer = 3,
kNum10MsToBuffer = 8,
};
int InitSampleRate();
@ -171,8 +173,10 @@ class OpenSlesInput {
// Thread-compatible.
bool CbThreadImpl();
#if !defined(WEBRTC_GONK)
// Java API handle
AudioManagerJni audio_manager_;
#endif
int id_;
PlayoutDelayProvider* delay_provider_;
@ -218,6 +222,21 @@ class OpenSlesInput {
// Audio status
uint16_t recording_delay_;
// dlopen for OpenSLES
void *opensles_lib_;
typedef SLresult (*slCreateEngine_t)(SLObjectItf *,
SLuint32,
const SLEngineOption *,
SLuint32,
const SLInterfaceID *,
const SLboolean *);
slCreateEngine_t f_slCreateEngine;
SLInterfaceID SL_IID_ENGINE_;
SLInterfaceID SL_IID_BUFFERQUEUE_;
SLInterfaceID SL_IID_ANDROIDCONFIGURATION_;
SLInterfaceID SL_IID_ANDROIDSIMPLEBUFFERQUEUE_;
SLInterfaceID SL_IID_RECORD_;
};
} // namespace webrtc

View File

@ -11,6 +11,7 @@
#include "webrtc/modules/audio_device/android/opensles_output.h"
#include <assert.h>
#include <dlfcn.h>
#include "webrtc/modules/audio_device/android/opensles_common.h"
#include "webrtc/modules/audio_device/android/fine_audio_buffer.h"
@ -63,7 +64,8 @@ OpenSlesOutput::OpenSlesOutput(const int32_t id)
speaker_sampling_rate_(kDefaultSampleRate),
buffer_size_samples_(0),
buffer_size_bytes_(0),
playout_delay_(0) {
playout_delay_(0),
opensles_lib_(NULL) {
}
OpenSlesOutput::~OpenSlesOutput() {
@ -83,15 +85,43 @@ void OpenSlesOutput::ClearAndroidAudioDeviceObjects() {
int32_t OpenSlesOutput::Init() {
assert(!initialized_);
/* Try to dynamically open the OpenSLES library */
opensles_lib_ = dlopen("libOpenSLES.so", RTLD_LAZY);
if (!opensles_lib_) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
" failed to dlopen OpenSLES library");
return -1;
}
f_slCreateEngine = (slCreateEngine_t)dlsym(opensles_lib_, "slCreateEngine");
SL_IID_ENGINE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ENGINE");
SL_IID_BUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_BUFFERQUEUE");
SL_IID_ANDROIDCONFIGURATION_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDCONFIGURATION");
SL_IID_PLAY_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_PLAY");
SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_ANDROIDSIMPLEBUFFERQUEUE");
SL_IID_VOLUME_ = *(SLInterfaceID *)dlsym(opensles_lib_, "SL_IID_VOLUME");
if (!f_slCreateEngine ||
!SL_IID_ENGINE_ ||
!SL_IID_BUFFERQUEUE_ ||
!SL_IID_ANDROIDCONFIGURATION_ ||
!SL_IID_PLAY_ ||
!SL_IID_ANDROIDSIMPLEBUFFERQUEUE_ ||
!SL_IID_VOLUME_) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
" failed to find OpenSLES function");
return -1;
}
// Set up OpenSl engine.
OPENSL_RETURN_ON_FAILURE(slCreateEngine(&sles_engine_, 1, kOption, 0,
NULL, NULL),
OPENSL_RETURN_ON_FAILURE(f_slCreateEngine(&sles_engine_, 1, kOption, 0,
NULL, NULL),
-1);
OPENSL_RETURN_ON_FAILURE((*sles_engine_)->Realize(sles_engine_,
SL_BOOLEAN_FALSE),
-1);
OPENSL_RETURN_ON_FAILURE((*sles_engine_)->GetInterface(sles_engine_,
SL_IID_ENGINE,
SL_IID_ENGINE_,
&sles_engine_itf_),
-1);
// Set up OpenSl output mix.
@ -123,6 +153,7 @@ int32_t OpenSlesOutput::Terminate() {
initialized_ = false;
speaker_initialized_ = false;
play_initialized_ = false;
dlclose(opensles_lib_);
return 0;
}
@ -311,6 +342,7 @@ void OpenSlesOutput::UpdatePlayoutDelay() {
}
bool OpenSlesOutput::SetLowLatency() {
#if !defined(WEBRTC_GONK)
if (!audio_manager_.low_latency_supported()) {
return false;
}
@ -319,6 +351,9 @@ bool OpenSlesOutput::SetLowLatency() {
speaker_sampling_rate_ = audio_manager_.native_output_sample_rate();
assert(speaker_sampling_rate_ > 0);
return true;
#else
return false;
#endif
}
void OpenSlesOutput::CalculateNumFifoBuffersNeeded() {
@ -404,7 +439,7 @@ bool OpenSlesOutput::CreateAudioPlayer() {
// Note the interfaces still need to be initialized. This only tells OpenSl
// that the interfaces will be needed at some point.
SLInterfaceID ids[kNumInterfaces] = {
SL_IID_BUFFERQUEUE, SL_IID_VOLUME, SL_IID_ANDROIDCONFIGURATION };
SL_IID_BUFFERQUEUE_, SL_IID_VOLUME_, SL_IID_ANDROIDCONFIGURATION_ };
SLboolean req[kNumInterfaces] = {
SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
OPENSL_RETURN_ON_FAILURE(
@ -417,11 +452,11 @@ bool OpenSlesOutput::CreateAudioPlayer() {
SL_BOOLEAN_FALSE),
false);
OPENSL_RETURN_ON_FAILURE(
(*sles_player_)->GetInterface(sles_player_, SL_IID_PLAY,
(*sles_player_)->GetInterface(sles_player_, SL_IID_PLAY_,
&sles_player_itf_),
false);
OPENSL_RETURN_ON_FAILURE(
(*sles_player_)->GetInterface(sles_player_, SL_IID_BUFFERQUEUE,
(*sles_player_)->GetInterface(sles_player_, SL_IID_BUFFERQUEUE_,
&sles_player_sbq_itf_),
false);
return true;

View File

@ -15,7 +15,9 @@
#include <SLES/OpenSLES_Android.h>
#include <SLES/OpenSLES_AndroidConfiguration.h>
#if !defined(WEBRTC_GONK)
#include "webrtc/modules/audio_device/android/audio_manager_jni.h"
#endif
#include "webrtc/modules/audio_device/android/low_latency_event.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
@ -189,8 +191,10 @@ class OpenSlesOutput : public PlayoutDelayProvider {
// Thread-compatible.
bool CbThreadImpl();
#if !defined(WEBRTC_GONK)
// Java API handle
AudioManagerJni audio_manager_;
#endif
int id_;
bool initialized_;
@ -237,6 +241,22 @@ class OpenSlesOutput : public PlayoutDelayProvider {
// Audio status
uint16_t playout_delay_;
// dlopen for OpenSLES
void *opensles_lib_;
typedef SLresult (*slCreateEngine_t)(SLObjectItf *,
SLuint32,
const SLEngineOption *,
SLuint32,
const SLInterfaceID *,
const SLboolean *);
slCreateEngine_t f_slCreateEngine;
SLInterfaceID SL_IID_ENGINE_;
SLInterfaceID SL_IID_BUFFERQUEUE_;
SLInterfaceID SL_IID_ANDROIDCONFIGURATION_;
SLInterfaceID SL_IID_PLAY_;
SLInterfaceID SL_IID_ANDROIDSIMPLEBUFFERQUEUE_;
SLInterfaceID SL_IID_VOLUME_;
};
} // namespace webrtc

View File

@ -8,6 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#if defined(_MSC_VER)
#include <windows.h>
#endif
#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
static int UpdatePos(int pos, int capacity) {
@ -18,7 +22,19 @@ namespace webrtc {
namespace subtle {
#if defined(__ARMEL__)
// Start with compiler support, then processor-specific hacks
#if defined(__GNUC__) || defined(__clang__)
// Available on GCC and clang - others?
inline void MemoryBarrier() {
__sync_synchronize();
}
#elif defined(_MSC_VER)
inline void MemoryBarrier() {
::MemoryBarrier();
}
#elif defined(__ARMEL__)
// From http://src.chromium.org/viewvc/chrome/trunk/src/base/atomicops_internals_arm_gcc.h
// Note that it is only the MemoryBarrier function that makes this class arm
// specific. Borrowing other MemoryBarrier implementations, this class could

View File

@ -47,11 +47,16 @@
'dummy/audio_device_utility_dummy.h',
],
'conditions': [
['OS=="linux"', {
['build_with_mozilla==1', {
'cflags_mozilla': [
'$(NSPR_CFLAGS)',
],
}],
['OS=="linux" or include_alsa_audio==1 or include_pulse_audio==1', {
'include_dirs': [
'linux',
],
}], # OS==linux
}], # OS=="linux" or include_alsa_audio==1 or include_pulse_audio==1
['OS=="ios"', {
'include_dirs': [
'ios',
@ -69,9 +74,24 @@
}],
['OS=="android"', {
'include_dirs': [
'/widget/android',
'android',
],
}], # OS==android
['moz_widget_toolkit_gonk==1', {
'cflags_mozilla': [
'-I$(ANDROID_SOURCE)/frameworks/wilhelm/include',
'-I$(ANDROID_SOURCE)/system/media/wilhelm/include',
],
'include_dirs': [
'android',
],
}], # moz_widget_toolkit_gonk==1
['enable_android_opensl==1', {
'include_dirs': [
'opensl',
],
}], # enable_android_opensl
['include_internal_audio_device==0', {
'defines': [
'WEBRTC_DUMMY_AUDIO_BUILD',
@ -79,14 +99,8 @@
}],
['include_internal_audio_device==1', {
'sources': [
'linux/alsasymboltable_linux.cc',
'linux/alsasymboltable_linux.h',
'linux/audio_device_alsa_linux.cc',
'linux/audio_device_alsa_linux.h',
'linux/audio_device_utility_linux.cc',
'linux/audio_device_utility_linux.h',
'linux/audio_mixer_manager_alsa_linux.cc',
'linux/audio_mixer_manager_alsa_linux.h',
'linux/latebindingsymboltable_linux.cc',
'linux/latebindingsymboltable_linux.h',
'ios/audio_device_ios.cc',
@ -110,60 +124,105 @@
'win/audio_device_utility_win.h',
'win/audio_mixer_manager_win.cc',
'win/audio_mixer_manager_win.h',
# used externally for getUserMedia
'opensl/single_rw_fifo.cc',
'opensl/single_rw_fifo.h',
'android/audio_device_template.h',
'android/audio_device_utility_android.cc',
'android/audio_device_utility_android.h',
'android/audio_manager_jni.cc',
'android/audio_manager_jni.h',
'android/audio_record_jni.cc',
'android/audio_record_jni.h',
'android/audio_track_jni.cc',
'android/audio_track_jni.h',
'android/fine_audio_buffer.cc',
'android/fine_audio_buffer.h',
'android/low_latency_event_posix.cc',
'android/low_latency_event.h',
'android/opensles_common.cc',
'android/opensles_common.h',
'android/opensles_input.cc',
'android/opensles_input.h',
'android/opensles_output.cc',
'android/opensles_output.h',
'android/single_rw_fifo.cc',
'android/single_rw_fifo.h',
],
'conditions': [
['OS=="android"', {
'sources': [
'opensl/audio_manager_jni.cc',
'opensl/audio_manager_jni.h',
'android/audio_device_jni_android.cc',
'android/audio_device_jni_android.h',
],
}],
['OS=="android" or moz_widget_toolkit_gonk==1', {
'link_settings': {
'libraries': [
'-llog',
'-lOpenSLES',
],
},
'conditions': [
['enable_android_opensl==1', {
'sources': [
'opensl/audio_device_opensles.cc',
'opensl/audio_device_opensles.h',
'opensl/fine_audio_buffer.cc',
'opensl/fine_audio_buffer.h',
'opensl/low_latency_event_posix.cc',
'opensl/low_latency_event.h',
'opensl/opensles_common.cc',
'opensl/opensles_common.h',
'opensl/opensles_input.cc',
'opensl/opensles_input.h',
'opensl/opensles_output.h',
'shared/audio_device_utility_shared.cc',
'shared/audio_device_utility_shared.h',
],
}, {
'sources': [
'shared/audio_device_utility_shared.cc',
'shared/audio_device_utility_shared.h',
'android/audio_device_jni_android.cc',
'android/audio_device_jni_android.h',
],
}],
['enable_android_opensl_output==1', {
'sources': [
'opensl/opensles_output.cc'
],
'defines': [
'WEBRTC_ANDROID_OPENSLES_OUTPUT',
],
}],
],
}],
['OS=="linux"', {
'defines': [
'LINUX_ALSA',
],
'link_settings': {
'libraries': [
'-ldl','-lX11',
],
},
'conditions': [
['include_pulse_audio==1', {
'defines': [
'LINUX_PULSE',
],
'sources': [
'linux/audio_device_pulse_linux.cc',
'linux/audio_device_pulse_linux.h',
'linux/audio_mixer_manager_pulse_linux.cc',
'linux/audio_mixer_manager_pulse_linux.h',
'linux/pulseaudiosymboltable_linux.cc',
'linux/pulseaudiosymboltable_linux.h',
],
}],
}],
['include_alsa_audio==1', {
'cflags_mozilla': [
'$(MOZ_ALSA_CFLAGS)',
],
'defines': [
'LINUX_ALSA',
],
'sources': [
'linux/alsasymboltable_linux.cc',
'linux/alsasymboltable_linux.h',
'linux/audio_device_alsa_linux.cc',
'linux/audio_device_alsa_linux.h',
'linux/audio_mixer_manager_alsa_linux.cc',
'linux/audio_mixer_manager_alsa_linux.h',
],
}],
['include_pulse_audio==1', {
'cflags_mozilla': [
'$(MOZ_PULSEAUDIO_CFLAGS)',
],
'defines': [
'LINUX_PULSE',
],
'sources': [
'linux/audio_device_pulse_linux.cc',
'linux/audio_device_pulse_linux.h',
'linux/audio_mixer_manager_pulse_linux.cc',
'linux/audio_mixer_manager_pulse_linux.h',
'linux/pulseaudiosymboltable_linux.cc',
'linux/pulseaudiosymboltable_linux.h',
],
}],
['OS=="mac" or OS=="ios"', {
@ -272,4 +331,3 @@
}], # include_tests
],
}

View File

@ -16,21 +16,28 @@
#include <assert.h>
#include <string.h>
#if defined(_WIN32)
#if defined(WEBRTC_DUMMY_AUDIO_BUILD)
// do not include platform specific headers
#elif defined(_WIN32)
#include "audio_device_utility_win.h"
#include "audio_device_wave_win.h"
#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
#include "audio_device_core_win.h"
#endif
#elif defined(WEBRTC_ANDROID)
#elif defined(WEBRTC_ANDROID_OPENSLES)
// ANDROID and GONK
#include <stdlib.h>
#include <dlfcn.h>
#include "audio_device_utility_android.h"
#include "webrtc/modules/audio_device/android/audio_device_template.h"
#if !defined(WEBRTC_GONK)
// GONK only supports opensles; android can use that or jni
#include "webrtc/modules/audio_device/android/audio_record_jni.h"
#include "webrtc/modules/audio_device/android/audio_track_jni.h"
#endif
#include "webrtc/modules/audio_device/android/opensles_input.h"
#include "webrtc/modules/audio_device/android/opensles_output.h"
#elif defined(WEBRTC_LINUX)
#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
#include "audio_device_utility_linux.h"
#if defined(LINUX_ALSA)
#include "audio_device_alsa_linux.h"
@ -159,7 +166,7 @@ int32_t AudioDeviceModuleImpl::CheckPlatform()
#elif defined(WEBRTC_ANDROID)
platform = kPlatformAndroid;
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is ANDROID");
#elif defined(WEBRTC_LINUX)
#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
platform = kPlatformLinux;
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "current platform is LINUX");
#elif defined(WEBRTC_IOS)
@ -258,17 +265,38 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects()
// Create the *Android OpenSLES* implementation of the Audio Device
//
#if defined(WEBRTC_ANDROID)
#if defined(WEBRTC_ANDROID) || defined (WEBRTC_GONK)
if (audioLayer == kPlatformDefaultAudio)
{
// AudioRecordJni provides hardware AEC and OpenSlesOutput low latency.
#if defined(WEBRTC_ANDROID_OPENSLES)
ptrAudioDevice = new AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>(Id());
#else
ptrAudioDevice = new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(Id());
// AudioRecordJni provides hardware AEC and OpenSlesOutput low latency.
#if defined (WEBRTC_ANDROID_OPENSLES)
// Android and Gonk
// Check if the OpenSLES library is available before going further.
void* opensles_lib = dlopen("libOpenSLES.so", RTLD_LAZY);
if (opensles_lib) {
// That worked, close for now and proceed normally.
dlclose(opensles_lib);
if (audioLayer == kPlatformDefaultAudio)
{
// Create *Android OpenSLES Audio* implementation
ptrAudioDevice = new AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>(Id());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"Android OpenSLES Audio APIs will be utilized");
}
}
#endif
#if !defined(WEBRTC_GONK)
// Fall back to this case if on Android 2.2/OpenSLES not available.
if (ptrAudioDevice == NULL) {
// Create the *Android Java* implementation of the Audio Device
if (audioLayer == kPlatformDefaultAudio)
{
// Create *Android JNI Audio* implementation
ptrAudioDevice = new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(Id());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Android JNI Audio APIs will be utilized");
}
}
#endif
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"Android OpenSLES Audio APIs will be utilized");
}
if (ptrAudioDevice != NULL)
@ -276,11 +304,11 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects()
// Create the Android implementation of the Device Utility.
ptrAudioDeviceUtility = new AudioDeviceUtilityAndroid(Id());
}
// END #if defined(WEBRTC_ANDROID)
// END #if defined(WEBRTC_ANDROID_OPENSLES)
// Create the *Linux* implementation of the Audio Device
//
#elif defined(WEBRTC_LINUX)
#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
if ((audioLayer == kLinuxPulseAudio) || (audioLayer == kPlatformDefaultAudio))
{
#if defined(LINUX_PULSE)
@ -328,7 +356,7 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects()
//
ptrAudioDeviceUtility = new AudioDeviceUtilityLinux(Id());
}
#endif // #if defined(WEBRTC_LINUX)
#endif // #if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
// Create the *iPhone* implementation of the Audio Device
//

View File

@ -46,7 +46,7 @@ bool AudioDeviceUtility::StringCompare(
} // namespace webrtc
#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
// ============================================================================
// Linux & Mac
@ -109,4 +109,4 @@ bool AudioDeviceUtility::StringCompare(
} // namespace webrtc
#endif // defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
#endif // defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)

View File

@ -1332,7 +1332,7 @@ int32_t AudioDeviceIPhone::InitPlayOrRecord() {
// todo: Add 48 kHz (increase buffer sizes). Other fs?
if ((playoutDesc.mSampleRate > 44090.0)
&& (playoutDesc.mSampleRate < 44110.0)) {
_adbSampFreq = 44000;
_adbSampFreq = 44100;
} else if ((playoutDesc.mSampleRate > 15990.0)
&& (playoutDesc.mSampleRate < 16010.0)) {
_adbSampFreq = 16000;

View File

@ -19,8 +19,8 @@
namespace webrtc {
class ThreadWrapper;
const uint32_t N_REC_SAMPLES_PER_SEC = 44000;
const uint32_t N_PLAY_SAMPLES_PER_SEC = 44000;
const uint32_t N_REC_SAMPLES_PER_SEC = 44100;
const uint32_t N_PLAY_SAMPLES_PER_SEC = 44100;
const uint32_t N_REC_CHANNELS = 1; // default is mono recording
const uint32_t N_PLAY_CHANNELS = 1; // default is mono playout

View File

@ -19,6 +19,13 @@
#include "webrtc/system_wrappers/interface/thread_wrapper.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "Latency.h"
#define LOG_FIRST_CAPTURE(x) LogTime(AsyncLatencyLogger::AudioCaptureBase, \
reinterpret_cast<uint64_t>(x), 0)
#define LOG_CAPTURE_FRAMES(x, frames) LogLatency(AsyncLatencyLogger::AudioCapture, \
reinterpret_cast<uint64_t>(x), frames)
webrtc_adm_linux_alsa::AlsaSymbolTable AlsaSymbolTable;
// Accesses ALSA functions through our late-binding symbol table instead of
@ -95,6 +102,7 @@ AudioDeviceLinuxALSA::AudioDeviceLinuxALSA(const int32_t id) :
_playBufType(AudioDeviceModule::kFixedBufferSize),
_initialized(false),
_recording(false),
_firstRecord(true),
_playing(false),
_recIsInitialized(false),
_playIsInitialized(false),
@ -181,6 +189,7 @@ int32_t AudioDeviceLinuxALSA::Init()
return 0;
}
#ifdef USE_X11
//Get X display handle for typing detection
_XDisplay = XOpenDisplay(NULL);
if (!_XDisplay)
@ -188,6 +197,7 @@ int32_t AudioDeviceLinuxALSA::Init()
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
" failed to open X display, typing detection will not work");
}
#endif
_playWarning = 0;
_playError = 0;
@ -255,11 +265,13 @@ int32_t AudioDeviceLinuxALSA::Terminate()
_critSect.Enter();
}
#ifdef USE_X11
if (_XDisplay)
{
XCloseDisplay(_XDisplay);
_XDisplay = NULL;
}
#endif
_initialized = false;
_outputDeviceIsSpecified = false;
@ -985,7 +997,8 @@ int32_t AudioDeviceLinuxALSA::RecordingDeviceName(
memset(guid, 0, kAdmMaxGuidSize);
}
return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize);
return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize,
guid, kAdmMaxGuidSize);
}
int16_t AudioDeviceLinuxALSA::RecordingDevices()
@ -1447,6 +1460,7 @@ int32_t AudioDeviceLinuxALSA::StartRecording()
}
// RECORDING
const char* threadName = "webrtc_audio_module_capture_thread";
_firstRecord = true;
_ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc,
this,
kRealtimePriority,
@ -1633,6 +1647,17 @@ int32_t AudioDeviceLinuxALSA::StartPlayout()
return -1;
}
int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
if (errVal < 0)
{
WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
" playout snd_pcm_prepare failed (%s)\n",
LATE(snd_strerror)(errVal));
// just log error
// if snd_pcm_open fails will return -1
}
unsigned int threadID(0);
if (!_ptrThreadPlay->Start(threadID))
{
@ -1647,16 +1672,6 @@ int32_t AudioDeviceLinuxALSA::StartPlayout()
}
_playThreadID = threadID;
int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
if (errVal < 0)
{
WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
" playout snd_pcm_prepare failed (%s)\n",
LATE(snd_strerror)(errVal));
// just log error
// if snd_pcm_open fails will return -1
}
return 0;
}
@ -1836,7 +1851,9 @@ int32_t AudioDeviceLinuxALSA::GetDevicesInfo(
const bool playback,
const int32_t enumDeviceNo,
char* enumDeviceName,
const int32_t ednLen) const
const int32_t ednLen,
char* enumDeviceId,
const int32_t ediLen) const
{
// Device enumeration based on libjingle implementation
@ -1875,6 +1892,8 @@ int32_t AudioDeviceLinuxALSA::GetDevicesInfo(
function == FUNC_GET_DEVICE_NAME_FOR_AN_ENUM) && enumDeviceNo == 0)
{
strcpy(enumDeviceName, "default");
if (enumDeviceId)
memset(enumDeviceId, 0, ediLen);
err = LATE(snd_device_name_free_hint)(hints);
if (err != 0)
@ -1937,6 +1956,11 @@ int32_t AudioDeviceLinuxALSA::GetDevicesInfo(
// We have found the enum device, copy the name to buffer.
strncpy(enumDeviceName, desc, ednLen);
enumDeviceName[ednLen-1] = '\0';
if (enumDeviceId)
{
strncpy(enumDeviceId, name, ediLen);
enumDeviceId[ediLen-1] = '\0';
}
keepSearching = false;
// Replace '\n' with '-'.
char * pret = strchr(enumDeviceName, '\n'/*0xa*/); //LF
@ -1949,6 +1973,11 @@ int32_t AudioDeviceLinuxALSA::GetDevicesInfo(
// We have found the enum device, copy the name to buffer.
strncpy(enumDeviceName, name, ednLen);
enumDeviceName[ednLen-1] = '\0';
if (enumDeviceId)
{
strncpy(enumDeviceId, name, ediLen);
enumDeviceId[ediLen-1] = '\0';
}
keepSearching = false;
}
@ -1973,7 +2002,7 @@ int32_t AudioDeviceLinuxALSA::GetDevicesInfo(
LATE(snd_strerror)(err));
// Continue and return true anyway, since we did get the whole list.
}
}
}
if (FUNC_GET_NUM_OF_DEVICE == function)
{
@ -2258,6 +2287,11 @@ bool AudioDeviceLinuxALSA::RecThreadProcess()
{ // buf is full
_recordingFramesLeft = _recordingFramesIn10MS;
if (_firstRecord) {
LOG_FIRST_CAPTURE(this);
_firstRecord = false;
}
LOG_CAPTURE_FRAMES(this, _recordingFramesIn10MS);
// store the recorded buffer (no action will be taken if the
// #recorded samples is not a full buffer)
_ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
@ -2342,7 +2376,7 @@ bool AudioDeviceLinuxALSA::RecThreadProcess()
bool AudioDeviceLinuxALSA::KeyPressed() const{
#ifdef USE_X11
char szKey[32];
unsigned int i = 0;
char state = 0;
@ -2360,5 +2394,8 @@ bool AudioDeviceLinuxALSA::KeyPressed() const{
// Save old state
memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
return (state != 0);
#else
return false;
#endif
}
} // namespace webrtc

View File

@ -16,7 +16,9 @@
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#ifdef USE_X11
#include <X11/Xlib.h>
#endif
#include <alsa/asoundlib.h>
#include <sys/ioctl.h>
#include <sys/soundcard.h>
@ -167,7 +169,9 @@ private:
const bool playback,
const int32_t enumDeviceNo = 0,
char* enumDeviceName = NULL,
const int32_t ednLen = 0) const;
const int32_t ednLen = 0,
char* enumDeviceID = NULL,
const int32_t ediLen = 0) const;
int32_t ErrorRecovery(int32_t error, snd_pcm_t* deviceHandle);
private:
@ -233,6 +237,7 @@ private:
private:
bool _initialized;
bool _recording;
bool _firstRecord;
bool _playing;
bool _recIsInitialized;
bool _playIsInitialized;
@ -250,7 +255,9 @@ private:
uint16_t _playBufDelayFixed; // fixed playback delay
char _oldKeyState[32];
#ifdef USE_X11
Display* _XDisplay;
#endif
};
}

View File

@ -202,6 +202,7 @@ int32_t AudioDeviceLinuxPulse::Init()
_recWarning = 0;
_recError = 0;
#ifdef USE_X11
//Get X display handle for typing detection
_XDisplay = XOpenDisplay(NULL);
if (!_XDisplay)
@ -209,6 +210,7 @@ int32_t AudioDeviceLinuxPulse::Init()
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
" failed to open X display, typing detection will not work");
}
#endif
// RECORDING
const char* threadName = "webrtc_audio_module_rec_thread";
@ -323,11 +325,13 @@ int32_t AudioDeviceLinuxPulse::Terminate()
return -1;
}
#ifdef USE_X11
if (_XDisplay)
{
XCloseDisplay(_XDisplay);
_XDisplay = NULL;
}
#endif
_initialized = false;
_outputDeviceIsSpecified = false;
@ -3081,7 +3085,7 @@ bool AudioDeviceLinuxPulse::RecThreadProcess()
}
bool AudioDeviceLinuxPulse::KeyPressed() const{
#ifdef USE_X11
char szKey[32];
unsigned int i = 0;
char state = 0;
@ -3099,5 +3103,8 @@ bool AudioDeviceLinuxPulse::KeyPressed() const{
// Save old state
memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
return (state != 0);
#else
return false;
#endif
}
}

View File

@ -15,7 +15,9 @@
#include "webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#ifdef USE_X11
#include <X11/Xlib.h>
#endif
#include <pulse/pulseaudio.h>
// We define this flag if it's missing from our headers, because we want to be
@ -375,7 +377,9 @@ private:
pa_buffer_attr _recBufferAttr;
char _oldKeyState[32];
#ifdef USE_X11
Display* _XDisplay;
#endif
};
}

View File

@ -27,7 +27,7 @@
#include "webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h"
#ifdef WEBRTC_LINUX
#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
#include <dlfcn.h>
#endif
@ -37,8 +37,8 @@ using namespace webrtc;
namespace webrtc_adm_linux {
inline static const char *GetDllError() {
#ifdef WEBRTC_LINUX
char *err = dlerror();
#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
const char *err = dlerror();
if (err) {
return err;
} else {
@ -50,7 +50,7 @@ inline static const char *GetDllError() {
}
DllHandle InternalLoadDll(const char dll_name[]) {
#ifdef WEBRTC_LINUX
#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
DllHandle handle = dlopen(dll_name, RTLD_NOW);
#else
#error Not implemented
@ -63,7 +63,7 @@ DllHandle InternalLoadDll(const char dll_name[]) {
}
void InternalUnloadDll(DllHandle handle) {
#ifdef WEBRTC_LINUX
#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
if (dlclose(handle) != 0) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
"%s", GetDllError());
@ -76,9 +76,9 @@ void InternalUnloadDll(DllHandle handle) {
static bool LoadSymbol(DllHandle handle,
const char *symbol_name,
void **symbol) {
#ifdef WEBRTC_LINUX
#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
*symbol = dlsym(handle, symbol_name);
char *err = dlerror();
const char *err = dlerror();
if (err) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
"Error loading symbol %s : %d", symbol_name, err);
@ -101,7 +101,7 @@ bool InternalLoadSymbols(DllHandle handle,
int num_symbols,
const char *const symbol_names[],
void *symbols[]) {
#ifdef WEBRTC_LINUX
#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
// Clear any old errors.
dlerror();
#endif

View File

@ -42,7 +42,7 @@
namespace webrtc_adm_linux {
#ifdef WEBRTC_LINUX
#if defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
typedef void *DllHandle;
const DllHandle kInvalidDllHandle = NULL;

View File

@ -29,7 +29,11 @@
namespace webrtc_adm_linux_pulse {
#if defined(__OpenBSD__) || defined(WEBRTC_GONK)
LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(PulseAudioSymbolTable, "libpulse.so")
#else
LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(PulseAudioSymbolTable, "libpulse.so.0")
#endif
#define X(sym) \
LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(PulseAudioSymbolTable, sym)
PULSE_AUDIO_SYMBOLS_LIST

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/audio_device_opensles_android.cc"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/audio_device_opensles_android.h"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/audio_manager_jni.cc"

View File

@ -0,0 +1,6 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/audio_manager_jni.h"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/fine_audio_buffer.cc"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/fine_audio_buffer.h"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/low_latency_event_posix.cc"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/low_latency_event_posix.h"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/opensles_common.cc"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/opensles_common.h"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/opensles_input.cc"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/opensles_input.h"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/opensles_output.cc"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/opensles_output.h"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/single_rw_fifo.cc"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/single_rw_fifo.h"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/audio_device_jni_android.cc"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/audio_device_jni_android.h"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/audio_device_utility_android.cc"

View File

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../android/audio_device_utility_android.h"

View File

@ -214,7 +214,7 @@ class AudioDeviceAPITest: public testing::Test {
// Create default implementation instance
EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
kId, AudioDeviceModule::kPlatformDefaultAudio)) != NULL);
#elif defined(WEBRTC_LINUX)
#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
kId, AudioDeviceModule::kWindowsWaveAudio)) == NULL);
EXPECT_TRUE((audio_device_ = AudioDeviceModuleImpl::Create(
@ -1707,7 +1707,7 @@ TEST_F(AudioDeviceAPITest, CPULoad) {
// TODO(kjellander): Fix flakiness causing failures on Windows.
// TODO(phoglund): Fix flakiness causing failures on Linux.
#if !defined(_WIN32) && !defined(WEBRTC_LINUX)
#if !defined(_WIN32) && !defined(WEBRTC_LINUX) && !defined(WEBRTC_BSD)
TEST_F(AudioDeviceAPITest, StartAndStopRawOutputFileRecording) {
// NOTE: this API is better tested in a functional test
CheckInitialPlayoutStates();
@ -1776,7 +1776,7 @@ TEST_F(AudioDeviceAPITest, StartAndStopRawInputFileRecording) {
// - size of raw_input_not_recording.pcm shall be 0
// - size of raw_input_not_recording.pcm shall be > 0
}
#endif // !WIN32 && !WEBRTC_LINUX
#endif // !WIN32 && !WEBRTC_LINUX && !defined(WEBRTC_BSD)
TEST_F(AudioDeviceAPITest, RecordingSampleRate) {
uint32_t sampleRate(0);
@ -1787,10 +1787,10 @@ TEST_F(AudioDeviceAPITest, RecordingSampleRate) {
EXPECT_EQ(48000, sampleRate);
#elif defined(ANDROID)
TEST_LOG("Recording sample rate is %u\n\n", sampleRate);
EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000));
EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000));
#elif defined(WEBRTC_IOS)
TEST_LOG("Recording sample rate is %u\n\n", sampleRate);
EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000) ||
EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000) ||
(sampleRate == 8000));
#endif
@ -1806,10 +1806,10 @@ TEST_F(AudioDeviceAPITest, PlayoutSampleRate) {
EXPECT_EQ(48000, sampleRate);
#elif defined(ANDROID)
TEST_LOG("Playout sample rate is %u\n\n", sampleRate);
EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000));
EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000));
#elif defined(WEBRTC_IOS)
TEST_LOG("Playout sample rate is %u\n\n", sampleRate);
EXPECT_TRUE((sampleRate == 44000) || (sampleRate == 16000) ||
EXPECT_TRUE((sampleRate == 44100) || (sampleRate == 16000) ||
(sampleRate == 8000));
#endif
}

View File

@ -321,12 +321,6 @@ int32_t AudioTransportImpl::NeedMorePlayData(
int32_t fsInHz(samplesPerSecIn);
int32_t fsOutHz(samplesPerSec);
if (fsInHz == 44100)
fsInHz = 44000;
if (fsOutHz == 44100)
fsOutHz = 44000;
if (nChannelsIn == 2 && nBytesPerSampleIn == 4)
{
// input is stereo => we will resample in stereo
@ -1236,7 +1230,7 @@ int32_t FuncTestManager::TestAudioTransport()
if (samplesPerSec == 48000) {
_audioTransport->SetFilePlayout(
true, GetResource(_playoutFile48.c_str()));
} else if (samplesPerSec == 44100 || samplesPerSec == 44000) {
} else if (samplesPerSec == 44100) {
_audioTransport->SetFilePlayout(
true, GetResource(_playoutFile44.c_str()));
} else if (samplesPerSec == 16000) {
@ -1469,7 +1463,7 @@ int32_t FuncTestManager::TestSpeakerVolume()
if (48000 == samplesPerSec) {
_audioTransport->SetFilePlayout(
true, GetResource(_playoutFile48.c_str()));
} else if (44100 == samplesPerSec || samplesPerSec == 44000) {
} else if (44100 == samplesPerSec) {
_audioTransport->SetFilePlayout(
true, GetResource(_playoutFile44.c_str()));
} else if (samplesPerSec == 16000) {
@ -1570,7 +1564,7 @@ int32_t FuncTestManager::TestSpeakerMute()
EXPECT_EQ(0, audioDevice->PlayoutSampleRate(&samplesPerSec));
if (48000 == samplesPerSec)
_audioTransport->SetFilePlayout(true, _playoutFile48.c_str());
else if (44100 == samplesPerSec || 44000 == samplesPerSec)
else if (44100 == samplesPerSec)
_audioTransport->SetFilePlayout(true, _playoutFile44.c_str());
else
{

View File

@ -1710,3 +1710,4 @@ static void TimeToFrequency(float time_data[PART_LEN2],
freq_data[1][i] = time_data[2 * i + 1];
}
}

View File

@ -429,3 +429,4 @@ void WebRtcAec_InitAec_SSE2(void) {
WebRtcAec_FilterAdaptation = FilterAdaptationSSE2;
WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressSSE2;
}

View File

@ -174,6 +174,7 @@
'aec/aec_rdft_sse2.c',
],
'cflags': ['-msse2',],
'cflags_mozilla': [ '-msse2', ],
'xcode_settings': {
'OTHER_CFLAGS': ['-msse2',],
},
@ -197,14 +198,17 @@
'dependencies': [
'<(gen_core_neon_offsets_gyp):*',
],
'sources': [
#
# We disable the ASM source, because our gyp->Makefile translator
# does not support the build steps to get the asm offsets.
'sources!': [
'aecm/aecm_core_neon.S',
'ns/nsx_core_neon.S',
],
'include_dirs': [
'<(shared_generated_dir)',
],
'sources!': [
'sources': [
'aecm/aecm_core_neon.c',
'ns/nsx_core_neon.c',
],

View File

@ -72,7 +72,7 @@ EchoCancellationImpl::EchoCancellationImpl(const AudioProcessingImpl* apm)
was_stream_drift_set_(false),
stream_has_echo_(false),
delay_logging_enabled_(false),
delay_correction_enabled_(false) {}
delay_correction_enabled_(true) {} // default to long AEC tail in Mozilla
EchoCancellationImpl::~EchoCancellationImpl() {}
@ -339,10 +339,12 @@ int EchoCancellationImpl::Initialize() {
return apm_->kNoError;
}
#if 0
void EchoCancellationImpl::SetExtraOptions(const Config& config) {
delay_correction_enabled_ = config.Get<DelayCorrection>().enabled;
Configure();
}
#endif
void* EchoCancellationImpl::CreateHandle() const {
Handle* handle = NULL;

View File

@ -34,7 +34,7 @@ class EchoCancellationImpl : public EchoCancellationImplWrapper {
// ProcessingComponent implementation.
virtual int Initialize() OVERRIDE;
virtual void SetExtraOptions(const Config& config) OVERRIDE;
// virtual void SetExtraOptions(const Config& config) OVERRIDE;
private:
// EchoCancellation implementation.

View File

@ -96,14 +96,22 @@ struct RTPVideoHeaderVP8 {
bool beginningOfPartition; // True if this packet is the first
// in a VP8 partition. Otherwise false
};
struct RTPVideoHeaderH264 {
uint8_t nalu_header;
bool single_nalu;
};
union RTPVideoTypeHeader {
RTPVideoHeaderVP8 VP8;
RTPVideoHeaderH264 H264;
};
enum RtpVideoCodecTypes {
kRtpVideoNone,
kRtpVideoGeneric,
kRtpVideoVp8
kRtpVideoVp8,
kRtpVideoH264
};
struct RTPVideoHeader {
uint16_t width; // size
@ -897,6 +905,11 @@ inline bool IsNewerTimestamp(uint32_t timestamp, uint32_t prev_timestamp) {
static_cast<uint32_t>(timestamp - prev_timestamp) < 0x80000000;
}
inline bool IsNewerOrSameTimestamp(uint32_t timestamp, uint32_t prev_timestamp) {
return timestamp == prev_timestamp ||
static_cast<uint32_t>(timestamp - prev_timestamp) < 0x80000000;
}
inline uint16_t LatestSequenceNumber(uint16_t sequence_number1,
uint16_t sequence_number2) {
return IsNewerSequenceNumber(sequence_number1, sequence_number2)

View File

@ -610,13 +610,13 @@ int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
// special cases?
if(_wavFormatObj.nSamplesPerSec == 44100)
{
_readSizeBytes = 440 * _wavFormatObj.nChannels *
_readSizeBytes = 441 * _wavFormatObj.nChannels *
(_wavFormatObj.nBitsPerSample / 8);
} else if(_wavFormatObj.nSamplesPerSec == 22050) {
_readSizeBytes = 220 * _wavFormatObj.nChannels *
_readSizeBytes = 220 * _wavFormatObj.nChannels * // XXX inexact!
(_wavFormatObj.nBitsPerSample / 8);
} else if(_wavFormatObj.nSamplesPerSec == 11025) {
_readSizeBytes = 110 * _wavFormatObj.nChannels *
_readSizeBytes = 110 * _wavFormatObj.nChannels * // XXX inexact!
(_wavFormatObj.nBitsPerSample / 8);
} else {
_readSizeBytes = (_wavFormatObj.nSamplesPerSec/100) *
@ -678,22 +678,22 @@ int32_t ModuleFileUtility::InitWavCodec(uint32_t samplesPerSec,
{
strcpy(codec_info_.plname, "L16");
_codecId = kCodecL16_16kHz;
codec_info_.pacsize = 110;
codec_info_.plfreq = 11000;
codec_info_.pacsize = 110; // XXX inexact!
codec_info_.plfreq = 11000; // XXX inexact!
}
else if(samplesPerSec == 22050)
{
strcpy(codec_info_.plname, "L16");
_codecId = kCodecL16_16kHz;
codec_info_.pacsize = 220;
codec_info_.plfreq = 22000;
codec_info_.pacsize = 220; // XXX inexact!
codec_info_.plfreq = 22000; // XXX inexact!
}
else if(samplesPerSec == 44100)
{
strcpy(codec_info_.plname, "L16");
_codecId = kCodecL16_16kHz;
codec_info_.pacsize = 440;
codec_info_.plfreq = 44000;
codec_info_.pacsize = 441;
codec_info_.plfreq = 44100;
}
else if(samplesPerSec == 48000)
{
@ -1126,8 +1126,6 @@ int32_t ModuleFileUtility::WriteWavHeader(
{
// Frame size in bytes for 10 ms of audio.
// TODO (hellner): 44.1 kHz has 440 samples frame size. Doesn't seem to
// be taken into consideration here!
int32_t frameSize = (freq / 100) * bytesPerSample * channels;
// Calculate the number of full frames that the wave file contain.

View File

@ -11,10 +11,6 @@
'../build/common.gypi',
'audio_coding/codecs/cng/cng.gypi',
'audio_coding/codecs/g711/g711.gypi',
'audio_coding/codecs/g722/g722.gypi',
'audio_coding/codecs/ilbc/ilbc.gypi',
'audio_coding/codecs/isac/main/source/isac.gypi',
'audio_coding/codecs/isac/fix/source/isacfix.gypi',
'audio_coding/codecs/pcm16b/pcm16b.gypi',
'audio_coding/main/source/audio_coding_module.gypi',
'audio_coding/neteq/neteq.gypi',
@ -36,13 +32,21 @@
'video_render/video_render.gypi',
],
'conditions': [
['include_g722==1', {
'includes': ['audio_coding/codecs/g722/g722.gypi',],
}],
['include_ilbc==1', {
'includes': ['audio_coding/codecs/ilbc/ilbc.gypi',],
}],
['include_isac==1', {
'includes': ['audio_coding/codecs/isac/main/source/isac.gypi',
'audio_coding/codecs/isac/fix/source/isacfix.gypi',],
}],
['include_opus==1', {
'includes': ['audio_coding/codecs/opus/opus.gypi',],
}],
['include_tests==1', {
'includes': [
'audio_coding/codecs/isac/isac_test.gypi',
'audio_coding/codecs/isac/isacfix_test.gypi',
'audio_processing/audio_processing_tests.gypi',
'rtp_rtcp/test/testFec/test_fec.gypi',
'video_coding/main/source/video_coding_test.gypi',
@ -50,6 +54,12 @@
'video_coding/codecs/test_framework/test_framework.gypi',
'video_coding/codecs/tools/video_codecs_tools.gypi',
], # includes
'conditions': [
['include_isac==1', {
'includes': ['audio_coding/codecs/isac/isac_test.gypi',
'audio_coding/codecs/isac/isacfix_test.gypi',],
}],
],
'variables': {
'conditions': [
# Desktop capturer is supported only on Windows, OSX and Linux.

View File

@ -440,6 +440,17 @@ class RtpRtcp : public Module {
*/
virtual int32_t ResetRTT(const uint32_t remoteSSRC)= 0 ;
/*
* Get time of last rr, as well as packets received remotely
* (derived from rr report + cached sender-side info).
*
* return -1 on failure else 0
*/
virtual int32_t GetReportBlockInfo(const uint32_t remote_ssrc,
uint32_t* ntp_high,
uint32_t* ntp_low,
uint32_t* packets_received,
uint64_t* octets_received) const = 0;
/*
* Force a send of a RTCP packet
* normal SR and RR are triggered via the process function

View File

@ -228,6 +228,27 @@ bool RTCPReceiver::GetAndResetXrRrRtt(uint16_t* rtt_ms) {
return true;
}
int32_t RTCPReceiver::GetReportBlockInfo(uint32_t remoteSSRC,
uint32_t* NTPHigh,
uint32_t* NTPLow,
uint32_t* PacketsReceived,
uint64_t* OctetsReceived) const
{
CriticalSectionScoped lock(_criticalSectionRTCPReceiver);
RTCPReportBlockInformation* reportBlock =
GetReportBlockInformation(remoteSSRC);
if (reportBlock == NULL) {
return -1;
}
*NTPHigh = reportBlock->lastReceivedRRNTPsecs;
*NTPLow = reportBlock->lastReceivedRRNTPfrac;
*PacketsReceived = reportBlock->remotePacketsReceived;
*OctetsReceived = reportBlock->remoteOctetsReceived;
return 0;
}
int32_t
RTCPReceiver::NTP(uint32_t *ReceivedNTPsecs,
uint32_t *ReceivedNTPfrac,
@ -505,8 +526,11 @@ void RTCPReceiver::HandleReportBlock(
// To avoid problem with acquiring _criticalSectionRTCPSender while holding
// _criticalSectionRTCPReceiver.
_criticalSectionRTCPReceiver->Leave();
uint32_t sendTimeMS =
_rtpRtcp.SendTimeOfSendReport(rtcpPacket.ReportBlockItem.LastSR);
uint32_t sendTimeMS = 0;
uint32_t sentPackets = 0;
uint64_t sentOctets = 0;
_rtpRtcp.GetSendReportMetadata(rtcpPacket.ReportBlockItem.LastSR,
&sendTimeMS, &sentPackets, &sentOctets);
_criticalSectionRTCPReceiver->Enter();
RTCPReportBlockInformation* reportBlock =
@ -524,6 +548,12 @@ void RTCPReceiver::HandleReportBlock(
reportBlock->remoteReceiveBlock.fractionLost = rb.FractionLost;
reportBlock->remoteReceiveBlock.cumulativeLost =
rb.CumulativeNumOfPacketsLost;
if (sentPackets > rb.CumulativeNumOfPacketsLost) {
uint32_t packetsReceived = sentPackets - rb.CumulativeNumOfPacketsLost;
reportBlock->remotePacketsReceived = packetsReceived;
reportBlock->remoteOctetsReceived = (sentOctets / sentPackets) *
packetsReceived;
}
if (rb.ExtendedHighestSequenceNumber >
reportBlock->remoteReceiveBlock.extendedHighSeqNum) {
// We have successfully delivered new RTP packets to the remote side after
@ -544,14 +574,15 @@ void RTCPReceiver::HandleReportBlock(
rtcpPacket.ReportBlockItem.DelayLastSR;
// local NTP time when we received this
uint32_t lastReceivedRRNTPsecs = 0;
uint32_t lastReceivedRRNTPfrac = 0;
reportBlock->lastReceivedRRNTPsecs = 0;
reportBlock->lastReceivedRRNTPfrac = 0;
_clock->CurrentNtp(lastReceivedRRNTPsecs, lastReceivedRRNTPfrac);
_clock->CurrentNtp(reportBlock->lastReceivedRRNTPsecs,
reportBlock->lastReceivedRRNTPfrac);
// time when we received this in MS
uint32_t receiveTimeMS = Clock::NtpToMs(lastReceivedRRNTPsecs,
lastReceivedRRNTPfrac);
uint32_t receiveTimeMS = Clock::NtpToMs(reportBlock->lastReceivedRRNTPsecs,
reportBlock->lastReceivedRRNTPfrac);
// Estimate RTT
uint32_t d = (delaySinceLastSendReport & 0x0000ffff) * 1000;

View File

@ -80,6 +80,12 @@ public:
int32_t ResetRTT(const uint32_t remoteSSRC);
int32_t GetReportBlockInfo(uint32_t remoteSSRC,
uint32_t* NTPHigh,
uint32_t* NTPLow,
uint32_t* PacketsReceived,
uint64_t* OctetsReceived) const;
int32_t SenderInfoReceived(RTCPSenderInfo* senderInfo) const;
bool GetAndResetXrRrRtt(uint16_t* rtt_ms);

View File

@ -103,6 +103,10 @@ RTCPPacketInformation::AddReportInfo(
RTCPReportBlockInformation::RTCPReportBlockInformation():
remoteReceiveBlock(),
remoteMaxJitter(0),
remotePacketsReceived(0),
remoteOctetsReceived(0),
lastReceivedRRNTPsecs(0),
lastReceivedRRNTPfrac(0),
RTT(0),
minRTT(0),
maxRTT(0),

View File

@ -32,6 +32,10 @@ public:
// Statistics
RTCPReportBlock remoteReceiveBlock;
uint32_t remoteMaxJitter;
uint32_t remotePacketsReceived;
uint64_t remoteOctetsReceived;
uint32_t lastReceivedRRNTPsecs;
uint32_t lastReceivedRRNTPfrac;
// RTT
uint16_t RTT;

View File

@ -130,6 +130,8 @@ RTCPSender::RTCPSender(const int32_t id,
_lastSendReport(),
_lastRTCPTime(),
_lastSRPacketCount(),
_lastSROctetCount(),
last_xr_rr_(),
@ -164,6 +166,8 @@ RTCPSender::RTCPSender(const int32_t id,
memset(_CNAME, 0, sizeof(_CNAME));
memset(_lastSendReport, 0, sizeof(_lastSendReport));
memset(_lastRTCPTime, 0, sizeof(_lastRTCPTime));
memset(_lastSRPacketCount, 0, sizeof(_lastSRPacketCount));
memset(_lastSROctetCount, 0, sizeof(_lastSROctetCount));
WEBRTC_TRACE(kTraceMemory, kTraceRtpRtcp, id, "%s created", __FUNCTION__);
}
@ -237,6 +241,8 @@ RTCPSender::Init()
memset(_CNAME, 0, sizeof(_CNAME));
memset(_lastSendReport, 0, sizeof(_lastSendReport));
memset(_lastRTCPTime, 0, sizeof(_lastRTCPTime));
memset(_lastSRPacketCount, 0, sizeof(_lastSRPacketCount));
memset(_lastSROctetCount, 0, sizeof(_lastSROctetCount));
last_xr_rr_.clear();
_nackCount = 0;
@ -579,26 +585,32 @@ RTCPSender::LastSendReport( uint32_t& lastRTCPTime)
return _lastSendReport[0];
}
uint32_t
RTCPSender::SendTimeOfSendReport(const uint32_t sendReport)
bool
RTCPSender::GetSendReportMetadata(const uint32_t sendReport,
uint32_t *timeOfSend,
uint32_t *packetCount,
uint64_t *octetCount)
{
CriticalSectionScoped lock(_criticalSectionRTCPSender);
// This is only saved when we are the sender
if((_lastSendReport[0] == 0) || (sendReport == 0))
{
return 0; // will be ignored
return false;
} else
{
for(int i = 0; i < RTCP_NUMBER_OF_SR; ++i)
{
if( _lastSendReport[i] == sendReport)
{
return _lastRTCPTime[i];
*timeOfSend = _lastRTCPTime[i];
*packetCount = _lastSRPacketCount[i];
*octetCount = _lastSROctetCount[i];
return true;
}
}
}
return 0;
return false;
}
bool RTCPSender::SendTimeOfXrRrReport(uint32_t mid_ntp,
@ -689,10 +701,14 @@ int32_t RTCPSender::BuildSR(const FeedbackState& feedback_state,
// shift old
_lastSendReport[i+1] = _lastSendReport[i];
_lastRTCPTime[i+1] =_lastRTCPTime[i];
_lastSRPacketCount[i+1] = _lastSRPacketCount[i];
_lastSROctetCount[i+1] = _lastSROctetCount[i];
}
_lastRTCPTime[0] = Clock::NtpToMs(NTPsec, NTPfrac);
_lastSendReport[0] = (NTPsec << 16) + (NTPfrac >> 16);
_lastSRPacketCount[0] = feedback_state.packet_count_sent;
_lastSROctetCount[0] = feedback_state.byte_count_sent;
// The timestamp of this RTCP packet should be estimated as the timestamp of
// the frame being captured at this moment. We are calculating that

View File

@ -108,7 +108,10 @@ public:
int32_t RemoveMixedCNAME(const uint32_t SSRC);
uint32_t SendTimeOfSendReport(const uint32_t sendReport);
bool GetSendReportMetadata(const uint32_t sendReport,
uint32_t *timeOfSend,
uint32_t *packetCount,
uint64_t *octetCount);
bool SendTimeOfXrRrReport(uint32_t mid_ntp, int64_t* time_ms) const;
@ -305,6 +308,8 @@ private:
// Sent
uint32_t _lastSendReport[RTCP_NUMBER_OF_SR]; // allow packet loss and RTT above 1 sec
uint32_t _lastRTCPTime[RTCP_NUMBER_OF_SR];
uint32_t _lastSRPacketCount[RTCP_NUMBER_OF_SR];
uint64_t _lastSROctetCount[RTCP_NUMBER_OF_SR];
// Sent XR receiver reference time report.
// <mid ntp (mid 32 bits of the 64 bits NTP timestamp), send time in ms>.

View File

@ -0,0 +1,114 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <string.h> // memcpy
#include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h"
#include "webrtc/system_wrappers/interface/trace.h"
namespace webrtc {
RtpFormatH264::RtpFormatH264(const uint8_t* payload_data,
uint32_t payload_size,
int max_payload_len)
: payload_data_(payload_data),
payload_size_(static_cast<int>(payload_size)),
max_payload_len_(static_cast<int>(max_payload_len)),
fragments_(0),
fragment_size_(0),
next_fragment_(-1) {
if (payload_size_ <= max_payload_len_) {
fragments_ = 0;
} else {
fragment_size_ = max_payload_len_ - kH264FUAHeaderLengthInBytes;
fragments_ = ((payload_size_ - kH264NALHeaderLengthInBytes) + (fragment_size_-1)) /
fragment_size_;
next_fragment_ = 0;
}
}
RtpFormatH264::~RtpFormatH264() {
}
int RtpFormatH264::NextPacket(uint8_t* buffer,
int* bytes_to_send,
bool* last_packet) {
if (next_fragment_ == fragments_) {
*bytes_to_send = 0;
*last_packet = true;
return -1;
}
// TODO(jesup) This supports Mode 1 packetization only
// For mode 0, it's all single-NAL, and maybe deal with that by simply
// setting a large max_payload_len when constructing this (and tell the
// codec to keep generated NAL sizes less than one packet). If the codec
// goes over, a fragmented RTP packet would be sent (and may work or not).
uint8_t header = payload_data_[0];
uint8_t type = header & kH264NAL_TypeMask;
if (payload_size_ <= max_payload_len_) {
// single NAL_UNIT
*bytes_to_send = payload_size_;
// TODO(jesup) - this doesn't work correctly for Mode 0.
// Unfortunately, we don't have a good signal to which NAL generated by
// the encoder is the last NAL of the frame. We need that to be passed
// through to this point, instead of trying to generate it from the packets
if (type == kH264NALU_SPS || type == kH264NALU_PPS ||
type == kH264NALU_SEI) {
*last_packet = false;
} else {
*last_packet = true;
}
memcpy(buffer, payload_data_, payload_size_);
WEBRTC_TRACE(kTraceStream, kTraceRtpRtcp, -1,
"RtpFormatH264(single NALU with type:%d, payload_size:%d",
type, payload_size_);
return 0;
} else {
uint8_t fu_indicator = (header & (kH264NAL_FBit | kH264NAL_NRIMask)) |
kH264NALU_FUA;
uint8_t fu_header = 0;
bool first_fragment = (next_fragment_ == 0);
bool last_fragment = (next_fragment_ == (fragments_ -1));
// S | E | R | 5 bit type.
fu_header |= (first_fragment ? kH264FU_SBit : 0);
fu_header |= (last_fragment ? kH264FU_EBit :0);
fu_header |= type;
buffer[0] = fu_indicator;
buffer[1] = fu_header;
if (last_fragment) {
// last fragment
*bytes_to_send = payload_size_ -
kH264NALHeaderLengthInBytes -
next_fragment_ * fragment_size_ +
kH264FUAHeaderLengthInBytes;
*last_packet = true;
memcpy(buffer + kH264FUAHeaderLengthInBytes,
payload_data_ + kH264NALHeaderLengthInBytes +
next_fragment_ * fragment_size_,
*bytes_to_send - kH264FUAHeaderLengthInBytes);
// We do not send original NALU header
} else {
*bytes_to_send = fragment_size_ + kH264FUAHeaderLengthInBytes;
*last_packet = false;
memcpy(buffer + kH264FUAHeaderLengthInBytes,
payload_data_ + kH264NALHeaderLengthInBytes +
next_fragment_ * fragment_size_,
fragment_size_); // We do not send original NALU header
}
next_fragment_++;
return 1;
}
}
} // namespace webrtc

View File

@ -0,0 +1,99 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* This file contains the declaration of the H264 packetizer class.
* A packetizer object is created for each encoded video frame. The
* constructor is called with the payload data and size,
* together with the fragmentation information and a packetizer mode
* of choice. Alternatively, if no fragmentation info is available, the
* second constructor can be used with only payload data and size; in that
* case the mode kEqualSize is used.
*
* After creating the packetizer, the method NextPacket is called
* repeatedly to get all packets for the frame. The method returns
* false as long as there are more packets left to fetch.
*/
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H264_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H264_H_
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/constructor_magic.h"
#include "webrtc/typedefs.h"
namespace webrtc {
// Packetizer for H264.
class RtpFormatH264 {
public:
enum {
kH264NALU_SLICE = 1,
kH264NALU_IDR = 5,
kH264NALU_SEI = 6,
kH264NALU_SPS = 7,
kH264NALU_PPS = 8,
kH264NALU_STAPA = 24,
kH264NALU_FUA = 28
};
static const int kH264NALHeaderLengthInBytes = 1;
static const int kH264FUAHeaderLengthInBytes = 2;
// bits for FU (A and B) indicators
enum H264NalDefs {
kH264NAL_FBit = 0x80,
kH264NAL_NRIMask = 0x60,
kH264NAL_TypeMask = 0x1F
};
enum H264FUDefs {
// bits for FU (A and B) headers
kH264FU_SBit = 0x80,
kH264FU_EBit = 0x40,
kH264FU_RBit = 0x20
};
// Initialize with payload from encoder.
// The payload_data must be exactly one encoded H264 frame.
RtpFormatH264(const uint8_t* payload_data,
uint32_t payload_size,
int max_payload_len);
~RtpFormatH264();
// Get the next payload with H264 payload header.
// max_payload_len limits the sum length of payload and H264 payload header.
// buffer is a pointer to where the output will be written.
// bytes_to_send is an output variable that will contain number of bytes
// written to buffer. Parameter last_packet is true for the last packet of
// the frame, false otherwise (i.e., call the function again to get the
// next packet).
// Returns 0 on success for single NAL_UNIT
// Returns 1 on success for fragmentation
// return -1 on error.
int NextPacket(uint8_t* buffer,
int* bytes_to_send,
bool* last_packet);
private:
const uint8_t* payload_data_;
const int payload_size_;
const int max_payload_len_;
int fragments_;
int fragment_size_;
int next_fragment_;
DISALLOW_COPY_AND_ASSIGN(RtpFormatH264);
};
} // namespace webrtc
#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H264_H_

View File

@ -448,6 +448,8 @@ class RTPPayloadVideoStrategy : public RTPPayloadStrategy {
RtpVideoCodecTypes videoType = kRtpVideoGeneric;
if (ModuleRTPUtility::StringCompare(payloadName, "VP8", 3)) {
videoType = kRtpVideoVp8;
} else if (ModuleRTPUtility::StringCompare(payloadName, "H264", 4)) {
videoType = kRtpVideoH264;
} else if (ModuleRTPUtility::StringCompare(payloadName, "I420", 4)) {
videoType = kRtpVideoGeneric;
} else if (ModuleRTPUtility::StringCompare(payloadName, "ULPFEC", 6)) {

View File

@ -15,6 +15,7 @@
#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/trace.h"
@ -124,6 +125,8 @@ int32_t RTPReceiverVideo::ParseVideoCodecSpecific(
return ReceiveGenericCodec(rtp_header, payload_data, payload_data_length);
case kRtpVideoVp8:
return ReceiveVp8Codec(rtp_header, payload_data, payload_data_length);
case kRtpVideoH264:
return ReceiveH264Codec(rtp_header, payload_data, payload_data_length);
case kRtpVideoNone:
break;
}
@ -220,6 +223,88 @@ int32_t RTPReceiverVideo::ReceiveVp8Codec(WebRtcRTPHeader* rtp_header,
return 0;
}
int32_t RTPReceiverVideo::ReceiveH264Codec(WebRtcRTPHeader* rtp_header,
const uint8_t* payload_data,
uint16_t payload_data_length) {
// real payload
uint8_t* payload;
uint16_t payload_length;
uint8_t nal_type = payload_data[0] & RtpFormatH264::kH264NAL_TypeMask;
// Note: This code handles only FU-A and single NALU mode packets.
if (nal_type == RtpFormatH264::kH264NALU_FUA) {
// Fragmentation
uint8_t fnri = payload_data[0] &
(RtpFormatH264::kH264NAL_FBit | RtpFormatH264::kH264NAL_NRIMask);
uint8_t original_nal_type = payload_data[1] & RtpFormatH264::kH264NAL_TypeMask;
bool first_fragment = !!(payload_data[1] & RtpFormatH264::kH264FU_SBit);
//bool last_fragment = !!(payload_data[1] & RtpFormatH264::kH264FU_EBit);
uint8_t original_nal_header = fnri | original_nal_type;
if (first_fragment) {
payload = const_cast<uint8_t*> (payload_data) +
RtpFormatH264::kH264NALHeaderLengthInBytes;
payload[0] = original_nal_header;
payload_length = payload_data_length -
RtpFormatH264::kH264NALHeaderLengthInBytes;
} else {
payload = const_cast<uint8_t*> (payload_data) +
RtpFormatH264::kH264FUAHeaderLengthInBytes;
payload_length = payload_data_length -
RtpFormatH264::kH264FUAHeaderLengthInBytes;
}
// WebRtcRTPHeader
if (original_nal_type == RtpFormatH264::kH264NALU_IDR) {
rtp_header->frameType = kVideoFrameKey;
} else {
rtp_header->frameType = kVideoFrameDelta;
}
rtp_header->type.Video.codec = kRtpVideoH264;
rtp_header->type.Video.isFirstPacket = first_fragment;
RTPVideoHeaderH264* h264_header = &rtp_header->type.Video.codecHeader.H264;
h264_header->nalu_header = original_nal_header;
h264_header->single_nalu = false;
} else {
// single NALU
payload = const_cast<uint8_t*> (payload_data);
payload_length = payload_data_length;
rtp_header->type.Video.codec = kRtpVideoH264;
rtp_header->type.Video.isFirstPacket = true;
RTPVideoHeaderH264* h264_header = &rtp_header->type.Video.codecHeader.H264;
h264_header->nalu_header = payload_data[0];
h264_header->single_nalu = true;
// WebRtcRTPHeader
switch (nal_type) {
// TODO(jesup): Evil hack. The jitter buffer *really* doesn't like
// "frames" to have the same timestamps. NOTE: this only works
// for SPS/PPS/IDR, not for PPS/SPS/IDR. Keep this until all issues
// are resolved in the jitter buffer
case RtpFormatH264::kH264NALU_SPS:
rtp_header->header.timestamp -= 10;
// fall through
case RtpFormatH264::kH264NALU_PPS:
rtp_header->header.timestamp -= 10;
// fall through
case RtpFormatH264::kH264NALU_IDR:
rtp_header->frameType = kVideoFrameKey;
break;
default:
rtp_header->frameType = kVideoFrameDelta;
break;
}
}
if (data_callback_->OnReceivedPayloadData(payload,
payload_length,
rtp_header) != 0) {
return -1;
}
return 0;
}
int32_t RTPReceiverVideo::ReceiveGenericCodec(
WebRtcRTPHeader* rtp_header,
const uint8_t* payload_data,

View File

@ -69,6 +69,10 @@ class RTPReceiverVideo : public RTPReceiverStrategy {
const uint8_t* payload_data,
uint16_t payload_data_length);
int32_t ReceiveH264Codec(WebRtcRTPHeader* rtp_header,
const uint8_t* payload_data,
uint16_t payload_data_length);
int32_t BuildRTPheader(const WebRtcRTPHeader* rtp_header,
uint8_t* data_buffer) const;

View File

@ -82,6 +82,8 @@
'rtp_sender_video.cc',
'rtp_sender_video.h',
'video_codec_information.h',
'rtp_format_h264.cc',
'rtp_format_h264.h',
'rtp_format_vp8.cc',
'rtp_format_vp8.h',
'rtp_format_video_generic.h',

View File

@ -899,6 +899,19 @@ int32_t ModuleRtpRtcpImpl::ResetRTT(const uint32_t remote_ssrc) {
return rtcp_receiver_.ResetRTT(remote_ssrc);
}
int32_t
ModuleRtpRtcpImpl::GetReportBlockInfo(const uint32_t remote_ssrc,
uint32_t* ntp_high,
uint32_t* ntp_low,
uint32_t* packets_received,
uint64_t* octets_received) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceRtpRtcp, id_, "RemotePacketsReceived()");
return rtcp_receiver_.GetReportBlockInfo(remote_ssrc,
ntp_high, ntp_low,
packets_received, octets_received);
}
// Reset RTP data counters for the sending side.
int32_t ModuleRtpRtcpImpl::ResetSendDataCountersRTP() {
WEBRTC_TRACE(kTraceModuleCall, kTraceRtpRtcp, id_,
@ -1529,9 +1542,14 @@ int32_t ModuleRtpRtcpImpl::SendRTCPReferencePictureSelection(
feedback_state, kRtcpRpsi, 0, 0, false, picture_id);
}
uint32_t ModuleRtpRtcpImpl::SendTimeOfSendReport(
const uint32_t send_report) {
return rtcp_sender_.SendTimeOfSendReport(send_report);
bool ModuleRtpRtcpImpl::GetSendReportMetadata(const uint32_t send_report,
uint32_t *time_of_send,
uint32_t *packet_count,
uint64_t *octet_count) {
return rtcp_sender_.GetSendReportMetadata(send_report,
time_of_send,
packet_count,
octet_count);
}
bool ModuleRtpRtcpImpl::SendTimeOfXrRrReport(

View File

@ -178,6 +178,12 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
// Reset RoundTripTime statistics.
virtual int32_t ResetRTT(const uint32_t remote_ssrc) OVERRIDE;
virtual int32_t GetReportBlockInfo(const uint32_t remote_ssrc,
uint32_t* ntp_high,
uint32_t* ntp_low,
uint32_t* packets_received,
uint64_t* octets_received) const OVERRIDE;
// Force a send of an RTCP packet.
// Normal SR and RR are triggered via the process function.
virtual int32_t SendRTCP(uint32_t rtcp_packet_type = kRtcpReport) OVERRIDE;
@ -354,7 +360,10 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
virtual BitrateStatisticsObserver* GetVideoBitrateObserver() const OVERRIDE;
virtual uint32_t SendTimeOfSendReport(const uint32_t send_report);
virtual bool GetSendReportMetadata(const uint32_t send_report,
uint32_t *time_of_send,
uint32_t *packet_count,
uint64_t *octet_count);
virtual bool SendTimeOfXrRrReport(uint32_t mid_ntp, int64_t* time_ms) const;

View File

@ -17,6 +17,7 @@
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/producer_fec.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
@ -92,6 +93,8 @@ int32_t RTPSenderVideo::RegisterVideoPayload(
RtpVideoCodecTypes videoType = kRtpVideoGeneric;
if (ModuleRTPUtility::StringCompare(payloadName, "VP8",3)) {
videoType = kRtpVideoVp8;
} else if (ModuleRTPUtility::StringCompare(payloadName, "H264", 4)) {
videoType = kRtpVideoH264;
} else if (ModuleRTPUtility::StringCompare(payloadName, "I420", 4)) {
videoType = kRtpVideoGeneric;
} else {
@ -285,53 +288,60 @@ RTPSenderVideo::SendVideo(const RtpVideoCodecTypes videoType,
const uint32_t payloadSize,
const RTPFragmentationHeader* fragmentation,
VideoCodecInformation* codecInfo,
const RTPVideoTypeHeader* rtpTypeHdr)
{
if( payloadSize == 0)
{
return -1;
}
const RTPVideoTypeHeader* rtpTypeHdr) {
if( payloadSize == 0) {
return -1;
}
if (frameType == kVideoFrameKey) {
producer_fec_.SetFecParameters(&key_fec_params_,
_numberFirstPartition);
} else {
producer_fec_.SetFecParameters(&delta_fec_params_,
_numberFirstPartition);
}
if (frameType == kVideoFrameKey) {
producer_fec_.SetFecParameters(&key_fec_params_,
_numberFirstPartition);
} else {
producer_fec_.SetFecParameters(&delta_fec_params_,
_numberFirstPartition);
}
// Default setting for number of first partition packets:
// Will be extracted in SendVP8 for VP8 codec; other codecs use 0
_numberFirstPartition = 0;
// Default setting for number of first partition packets:
// Will be extracted in SendVP8 for VP8 codec; other codecs use 0
_numberFirstPartition = 0;
int32_t retVal = -1;
switch(videoType)
{
case kRtpVideoGeneric:
retVal = SendGeneric(frameType, payloadType, captureTimeStamp,
capture_time_ms, payloadData, payloadSize);
break;
case kRtpVideoVp8:
retVal = SendVP8(frameType,
payloadType,
captureTimeStamp,
capture_time_ms,
payloadData,
payloadSize,
fragmentation,
rtpTypeHdr);
break;
default:
assert(false);
break;
}
if(retVal <= 0)
{
return retVal;
}
WEBRTC_TRACE(kTraceStream, kTraceRtpRtcp, _id, "%s(timestamp:%u)",
__FUNCTION__, captureTimeStamp);
return 0;
int32_t retVal = -1;
switch(videoType) {
case kRtpVideoGeneric:
retVal = SendGeneric(frameType, payloadType, captureTimeStamp,
capture_time_ms, payloadData, payloadSize);
break;
case kRtpVideoVp8:
retVal = SendVP8(frameType,
payloadType,
captureTimeStamp,
capture_time_ms,
payloadData,
payloadSize,
fragmentation,
rtpTypeHdr);
break;
case kRtpVideoH264:
retVal = SendH264(frameType,
payloadType,
captureTimeStamp,
capture_time_ms,
payloadData,
payloadSize,
fragmentation,
rtpTypeHdr);
break;
default:
assert(false);
break;
}
if(retVal <= 0) {
return retVal;
}
WEBRTC_TRACE(kTraceStream, kTraceRtpRtcp, _id, "%s(timestamp:%u)",
__FUNCTION__, captureTimeStamp);
return 0;
}
int32_t RTPSenderVideo::SendGeneric(const FrameType frame_type,
@ -486,6 +496,52 @@ RTPSenderVideo::SendVP8(const FrameType frameType,
return 0;
}
int32_t RTPSenderVideo::SendH264(const FrameType frameType,
const int8_t payloadType,
const uint32_t captureTimeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const uint32_t payloadSize,
const RTPFragmentationHeader* fragmentation,
const RTPVideoTypeHeader* rtpTypeHdr) {
const uint16_t rtpHeaderLength = _rtpSender.RTPHeaderLength();
int32_t payloadBytesToSend = payloadSize;
const uint8_t* data = payloadData;
uint16_t maxPayloadLengthH264 = _rtpSender.MaxDataPayloadLength();
RtpFormatH264 packetizer(data, payloadBytesToSend, maxPayloadLengthH264);
StorageType storage = kAllowRetransmission;
bool protect = (frameType == kVideoFrameKey);
bool last = false;
while (!last) {
// Write H264 Payload
uint8_t dataBuffer[IP_PACKET_SIZE] = {0};
int payloadBytesInPacket = 0;
int ret_val = packetizer.NextPacket(&dataBuffer[rtpHeaderLength],
&payloadBytesInPacket, &last);
if (ret_val < 0) {
return -1;
}
// Write RTP header.
// Set marker bit true if this is the last packet in frame.
_rtpSender.BuildRTPheader(dataBuffer, payloadType, last,
captureTimeStamp, capture_time_ms);
if (-1 == SendVideoPacket(dataBuffer, payloadBytesInPacket,
rtpHeaderLength, captureTimeStamp,
capture_time_ms, storage, protect)) {
}
if (ret_val == 0) {
// single NAL unit
last = true;
}
}
return 0;
}
void RTPSenderVideo::ProcessBitrate() {
_videoBitrate.Process();
_fecOverheadRate.Process();

View File

@ -111,6 +111,15 @@ private:
const RTPFragmentationHeader* fragmentation,
const RTPVideoTypeHeader* rtpTypeHdr);
int32_t SendH264(const FrameType frameType,
const int8_t payloadType,
const uint32_t captureTimeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
const uint32_t payloadSize,
const RTPFragmentationHeader* fragmentation,
const RTPVideoTypeHeader* rtpTypeHdr);
private:
int32_t _id;
RTPSenderInterface& _rtpSender;

View File

@ -21,7 +21,7 @@
#include <WinSock.h> // timeval
#include <MMSystem.h> // timeGetTime
#elif ((defined WEBRTC_LINUX) || (defined WEBRTC_MAC))
#elif ((defined WEBRTC_LINUX) || (defined WEBRTC_BSD) || (defined WEBRTC_MAC))
#include <sys/time.h> // gettimeofday
#include <time.h>
#endif
@ -96,9 +96,9 @@ uint32_t GetCurrentRTP(Clock* clock, uint32_t freq) {
}
uint32_t ConvertNTPTimeToRTP(uint32_t NTPsec, uint32_t NTPfrac, uint32_t freq) {
float ftemp = (float)NTPfrac / (float)NTP_FRAC;
float ftemp = (float)NTPfrac / (float)NTP_FRAC;
uint32_t tmp = (uint32_t)(ftemp * freq);
return NTPsec * freq + tmp;
return NTPsec * freq + tmp;
}
uint32_t ConvertNTPTimeToMS(uint32_t NTPsec, uint32_t NTPfrac) {
@ -118,7 +118,7 @@ bool StringCompare(const char* str1, const char* str2,
const uint32_t length) {
return (_strnicmp(str1, str2, length) == 0) ? true : false;
}
#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
bool StringCompare(const char* str1, const char* str2,
const uint32_t length) {
return (strncasecmp(str1, str2, length) == 0) ? true : false;

View File

@ -87,7 +87,7 @@ int32_t FilePlayerImpl::Frequency() const
{
return 32000;
}
else if(_codec.plfreq == 44000)
else if(_codec.plfreq == 44100 || _codec.plfreq == 44000 ) // XXX just 44100?
{
return 32000;
}

View File

@ -19,7 +19,7 @@
#if defined(_WIN32)
#include <Windows.h>
#include <mmsystem.h>
#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC) || defined(WEBRTC_BSD)
#include <string.h>
#include <sys/time.h>
#include <time.h>
@ -237,7 +237,7 @@ inline uint32_t RtpDumpImpl::GetTimeInMS() const
{
#if defined(_WIN32)
return timeGetTime();
#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
#elif defined(WEBRTC_LINUX) || defined(WEBRTC_BSD) || defined(WEBRTC_MAC)
struct timeval tv;
struct timezone tz;
unsigned long val;

View File

@ -16,6 +16,9 @@
#include "webrtc/modules/video_capture/device_info_impl.h"
#include "webrtc/modules/video_capture/video_capture_impl.h"
#define AndroidJavaCaptureDeviceInfoClass "org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid"
#define AndroidJavaCaptureCapabilityClass "org/webrtc/videoengine/CaptureCapabilityAndroid"
namespace webrtc
{
namespace videocapturemodule

View File

@ -48,7 +48,7 @@ int32_t DeviceInfoImpl::NumberOfCapabilities(
if (_lastUsedDeviceNameLength == strlen((char*) deviceUniqueIdUTF8))
{
// Is it the same device that is asked for again.
#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
if(strncasecmp((char*)_lastUsedDeviceName,
(char*) deviceUniqueIdUTF8,
_lastUsedDeviceNameLength)==0)
@ -85,7 +85,7 @@ int32_t DeviceInfoImpl::GetCapability(const char* deviceUniqueIdUTF8,
ReadLockScoped cs(_apiLock);
if ((_lastUsedDeviceNameLength != strlen((char*) deviceUniqueIdUTF8))
#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
|| (strncasecmp((char*)_lastUsedDeviceName,
(char*) deviceUniqueIdUTF8,
_lastUsedDeviceNameLength)!=0))
@ -133,7 +133,7 @@ int32_t DeviceInfoImpl::GetBestMatchedCapability(
ReadLockScoped cs(_apiLock);
if ((_lastUsedDeviceNameLength != strlen((char*) deviceUniqueIdUTF8))
#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || defined(WEBRTC_BSD)
|| (strncasecmp((char*)_lastUsedDeviceName,
(char*) deviceUniqueIdUTF8,
_lastUsedDeviceNameLength)!=0))

View File

@ -18,7 +18,13 @@
#include <sys/stat.h>
#include <unistd.h>
//v4l includes
#if defined(__NetBSD__) || defined(__OpenBSD__)
#include <sys/videoio.h>
#elif defined(__sun)
#include <sys/videodev2.h>
#else
#include <linux/videodev2.h>
#endif
#include "webrtc/system_wrappers/interface/ref_count.h"
#include "webrtc/system_wrappers/interface/trace.h"
@ -93,9 +99,10 @@ int32_t DeviceInfoLinux::GetDeviceName(
char device[20];
int fd = -1;
bool found = false;
for (int n = 0; n < 64; n++)
int device_index;
for (device_index = 0; device_index < 64; device_index++)
{
sprintf(device, "/dev/video%d", n);
sprintf(device, "/dev/video%d", device_index);
if ((fd = open(device, O_RDONLY)) != -1)
{
if (count == deviceNumber) {
@ -154,6 +161,15 @@ int32_t DeviceInfoLinux::GetDeviceName(
"buffer passed is too small");
return -1;
}
} else {
// if there's no bus info to use for uniqueId, invent one - and it has to be repeatable
if (snprintf(deviceUniqueIdUTF8, deviceUniqueIdUTF8Length, "fake_%u", device_index) >=
deviceUniqueIdUTF8Length)
{
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
"buffer passed is too small");
return -1;
}
}
return 0;
@ -165,6 +181,7 @@ int32_t DeviceInfoLinux::CreateCapabilityMap(
int fd;
char device[32];
bool found = false;
int device_index;
const int32_t deviceUniqueIdUTF8Length =
(int32_t) strlen((char*) deviceUniqueIdUTF8);
@ -177,40 +194,41 @@ int32_t DeviceInfoLinux::CreateCapabilityMap(
"CreateCapabilityMap called for device %s", deviceUniqueIdUTF8);
/* detect /dev/video [0-63] entries */
for (int n = 0; n < 64; ++n)
if (sscanf(deviceUniqueIdUTF8,"fake_%d",&device_index) == 1)
{
sprintf(device, "/dev/video%d", n);
sprintf(device, "/dev/video%d", device_index);
fd = open(device, O_RDONLY);
if (fd == -1)
continue;
// query device capabilities
struct v4l2_capability cap;
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0)
{
if (cap.bus_info[0] != 0)
{
if (strncmp((const char*) cap.bus_info,
(const char*) deviceUniqueIdUTF8,
strlen((const char*) deviceUniqueIdUTF8)) == 0) //match with device id
{
found = true;
break; // fd matches with device unique id supplied
}
}
else //match for device name
{
if (IsDeviceNameMatches((const char*) cap.card,
(const char*) deviceUniqueIdUTF8))
{
found = true;
break;
}
}
if (fd != -1) {
found = true;
}
close(fd); // close since this is not the matching device
}
} else {
/* detect /dev/video [0-63] entries */
for (int n = 0; n < 64; ++n)
{
sprintf(device, "/dev/video%d", n);
fd = open(device, O_RDONLY);
if (fd == -1)
continue;
// query device capabilities
struct v4l2_capability cap;
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0)
{
if (cap.bus_info[0] != 0)
{
if (strncmp((const char*) cap.bus_info,
(const char*) deviceUniqueIdUTF8,
strlen((const char*) deviceUniqueIdUTF8)) == 0) //match with device id
{
found = true;
break; // fd matches with device unique id supplied
}
}
// else can't be a match as the test for fake_* above would have matched it
}
close(fd); // close since this is not the matching device
}
}
if (!found)
{
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id, "no matching device found");

View File

@ -10,7 +10,6 @@
#include <errno.h>
#include <fcntl.h>
#include <linux/videodev2.h>
#include <stdio.h>
#include <string.h>
#include <sys/ioctl.h>
@ -18,7 +17,15 @@
#include <sys/stat.h>
#include <unistd.h>
#include <iostream>
//v4l includes
#if defined(__NetBSD__) || defined(__OpenBSD__)
#include <sys/videoio.h>
#elif defined(__sun)
#include <sys/videodev2.h>
#else
#include <linux/videodev2.h>
#endif
#include <new>
#include "webrtc/modules/video_capture/linux/video_capture_linux.h"
@ -71,6 +78,13 @@ int32_t VideoCaptureModuleV4L2::Init(const char* deviceUniqueIdUTF8)
memcpy(_deviceUniqueId, deviceUniqueIdUTF8, len + 1);
}
int device_index;
if (sscanf(deviceUniqueIdUTF8,"fake_%d", &device_index) == 1)
{
_deviceId = device_index;
return 0;
}
int fd;
char device[32];
bool found = false;

View File

@ -15,6 +15,20 @@
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/trace.h"
class nsAutoreleasePool {
public:
nsAutoreleasePool()
{
mLocalPool = [[NSAutoreleasePool alloc] init];
}
~nsAutoreleasePool()
{
[mLocalPool release];
}
private:
NSAutoreleasePool *mLocalPool;
};
namespace webrtc
{
@ -41,6 +55,7 @@ VideoCaptureMacQTKit::VideoCaptureMacQTKit(const int32_t id) :
VideoCaptureMacQTKit::~VideoCaptureMacQTKit()
{
nsAutoreleasePool localPool;
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCapture, _id,
"~VideoCaptureMacQTKit() called");
if(_captureDevice)
@ -71,6 +86,8 @@ int32_t VideoCaptureMacQTKit::Init(
_deviceUniqueId = new char[nameLength+1];
memcpy(_deviceUniqueId, iDeviceUniqueIdUTF8,nameLength+1);
nsAutoreleasePool localPool;
_captureDevice = [[VideoCaptureMacQTKitObjC alloc] init];
if(NULL == _captureDevice)
{
@ -164,6 +181,7 @@ int32_t VideoCaptureMacQTKit::StartCapture(
const VideoCaptureCapability& capability)
{
nsAutoreleasePool localPool;
_captureWidth = capability.width;
_captureHeight = capability.height;
_captureFrameRate = capability.maxFPS;
@ -180,6 +198,7 @@ int32_t VideoCaptureMacQTKit::StartCapture(
int32_t VideoCaptureMacQTKit::StopCapture()
{
nsAutoreleasePool localPool;
[_captureDevice stopCapture];
_isCapturing = false;
return 0;

View File

@ -13,6 +13,20 @@
#include "webrtc/modules/video_capture/video_capture_config.h"
#include "webrtc/system_wrappers/interface/trace.h"
class nsAutoreleasePool {
public:
nsAutoreleasePool()
{
mLocalPool = [[NSAutoreleasePool alloc] init];
}
~nsAutoreleasePool()
{
[mLocalPool release];
}
private:
NSAutoreleasePool *mLocalPool;
};
namespace webrtc
{
namespace videocapturemodule
@ -21,13 +35,14 @@ namespace videocapturemodule
VideoCaptureMacQTKitInfo::VideoCaptureMacQTKitInfo(const int32_t id) :
DeviceInfoImpl(id)
{
nsAutoreleasePool localPool;
_captureInfo = [[VideoCaptureMacQTKitInfoObjC alloc] init];
}
VideoCaptureMacQTKitInfo::~VideoCaptureMacQTKitInfo()
{
nsAutoreleasePool localPool;
[_captureInfo release];
}
int32_t VideoCaptureMacQTKitInfo::Init()
@ -39,6 +54,7 @@ int32_t VideoCaptureMacQTKitInfo::Init()
uint32_t VideoCaptureMacQTKitInfo::NumberOfDevices()
{
nsAutoreleasePool localPool;
uint32_t captureDeviceCount =
[[_captureInfo getCaptureDeviceCount]intValue];
return captureDeviceCount;
@ -51,6 +67,7 @@ int32_t VideoCaptureMacQTKitInfo::GetDeviceName(
uint32_t deviceUniqueIdUTF8Length, char* productUniqueIdUTF8,
uint32_t productUniqueIdUTF8Length)
{
nsAutoreleasePool localPool;
int errNum = [[_captureInfo getDeviceNamesFromIndex:deviceNumber
DefaultName:deviceNameUTF8 WithLength:deviceNameLength
AndUniqueID:deviceUniqueIdUTF8
@ -104,6 +121,7 @@ int32_t VideoCaptureMacQTKitInfo::DisplayCaptureSettingsDialogBox(
uint32_t positionX, uint32_t positionY)
{
nsAutoreleasePool localPool;
return [[_captureInfo
displayCaptureSettingsDialogBoxWithDevice:deviceUniqueIdUTF8
AndTitle:dialogTitleUTF8

Some files were not shown because too many files have changed in this diff Show More