mirror of
https://github.com/DrKLO/Telegram.git
synced 2025-01-14 21:53:55 +01:00
Update to 8.0.0 (2406)
This commit is contained in:
parent
ab221dafad
commit
368822d20f
191 changed files with 12004 additions and 2919 deletions
|
@ -1,4 +1,4 @@
|
|||
FROM gradle:6.7.1-jdk11
|
||||
FROM gradle:7.0.2-jdk11
|
||||
|
||||
ENV ANDROID_SDK_URL https://dl.google.com/android/repository/commandlinetools-linux-7302050_latest.zip
|
||||
ENV ANDROID_API_LEVEL android-31
|
||||
|
|
|
@ -25,7 +25,7 @@ dependencies {
|
|||
compileOnly 'org.checkerframework:checker-qual:2.5.2'
|
||||
compileOnly 'org.checkerframework:checker-compat-qual:2.5.0'
|
||||
implementation 'com.google.firebase:firebase-messaging:22.0.0'
|
||||
implementation 'com.google.firebase:firebase-config:21.0.0'
|
||||
implementation 'com.google.firebase:firebase-config:21.0.1'
|
||||
implementation 'com.google.firebase:firebase-datatransport:18.0.1'
|
||||
implementation 'com.google.firebase:firebase-appindexing:20.0.0'
|
||||
implementation 'com.google.android.gms:play-services-maps:17.0.1'
|
||||
|
@ -299,7 +299,7 @@ android {
|
|||
}
|
||||
}
|
||||
|
||||
defaultConfig.versionCode = 2390
|
||||
defaultConfig.versionCode = 2406
|
||||
|
||||
applicationVariants.all { variant ->
|
||||
variant.outputs.all { output ->
|
||||
|
@ -318,7 +318,7 @@ android {
|
|||
defaultConfig {
|
||||
minSdkVersion 16
|
||||
targetSdkVersion 29
|
||||
versionName "7.9.3"
|
||||
versionName "8.0.0"
|
||||
|
||||
vectorDrawables.generatedDensities = ['mdpi', 'hdpi', 'xhdpi', 'xxhdpi']
|
||||
|
||||
|
|
|
@ -1224,7 +1224,7 @@ JNIEXPORT void Java_org_telegram_messenger_Utilities_generateGradient(JNIEnv *en
|
|||
float directPixelY;
|
||||
float centerDistanceY;
|
||||
float centerDistanceY2;
|
||||
int32_t colorsCount = colorsArray[12] == 0 ? 3 : 4;
|
||||
int32_t colorsCount = colorsArray[12] == 0 && colorsArray[13] == 0 && colorsArray[14] == 0 && colorsArray[15] == 0 ? 3 : 4;
|
||||
|
||||
for (int y = 0; y < height; y++) {
|
||||
if (pixelCache == nullptr) {
|
||||
|
|
|
@ -2499,7 +2499,7 @@ void ConnectionsManager::processRequestQueue(uint32_t connectionTypes, uint32_t
|
|||
} else {
|
||||
currentCount = 0;
|
||||
}
|
||||
if (!networkAvailable || currentCount >= 12) {
|
||||
if (!networkAvailable || currentCount >= 16) {
|
||||
iter++;
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -918,7 +918,9 @@ add_library(tgcalls STATIC
|
|||
voip/tgcalls/group/GroupNetworkManager.cpp
|
||||
voip/tgcalls/group/GroupInstanceCustomImpl.cpp
|
||||
voip/tgcalls/group/GroupJoinPayloadInternal.cpp
|
||||
voip/tgcalls/group/StreamingPart.cpp
|
||||
voip/tgcalls/group/AudioStreamingPart.cpp
|
||||
voip/tgcalls/group/VideoStreamingPart.cpp
|
||||
voip/tgcalls/group/StreamingMediaContext.cpp
|
||||
voip/tgcalls/third-party/json11.cpp
|
||||
|
||||
voip/webrtc/rtc_base/async_invoker.cc
|
||||
|
|
|
@ -76,10 +76,12 @@ class BroadcastPartTaskJava : public BroadcastPartTask {
|
|||
public:
|
||||
BroadcastPartTaskJava(std::shared_ptr<PlatformContext> platformContext,
|
||||
std::function<void(BroadcastPart &&)> callback,
|
||||
int64_t timestamp) :
|
||||
int64_t timestamp, int32_t videoChannel, VideoChannelDescription::Quality quality) :
|
||||
_platformContext(std::move(platformContext)),
|
||||
_callback(std::move(callback)),
|
||||
_timestamp(timestamp) {
|
||||
_timestamp(timestamp),
|
||||
_videoChannel(videoChannel),
|
||||
_quality(quality) {
|
||||
}
|
||||
|
||||
void call(int64_t ts, int64_t responseTs, BroadcastPart::Status status, uint8_t *data, int32_t len) {
|
||||
|
@ -91,22 +93,48 @@ public:
|
|||
part.responseTimestamp = responseTs / 1000.0;
|
||||
part.status = status;
|
||||
if (data != nullptr) {
|
||||
part.oggData = std::vector<uint8_t>(data, data + len);
|
||||
part.data = std::vector<uint8_t>(data, data + len);
|
||||
}
|
||||
_callback(std::move<>(part));
|
||||
}
|
||||
|
||||
bool isValidTaskFor(int64_t timestamp, int32_t videoChannel, VideoChannelDescription::Quality quality) {
|
||||
if (_videoChannel == 0) {
|
||||
return _timestamp == timestamp;
|
||||
} else {
|
||||
return _timestamp == timestamp && _videoChannel == videoChannel && _quality == quality;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void cancel() override {
|
||||
tgvoip::jni::DoWithJNI([&](JNIEnv *env) {
|
||||
jobject globalRef = ((AndroidContext *) _platformContext.get())->getJavaInstance();
|
||||
env->CallVoidMethod(globalRef, env->GetMethodID(NativeInstanceClass, "onCancelRequestBroadcastPart", "(J)V"), _timestamp);
|
||||
auto context = (AndroidContext *) _platformContext.get();
|
||||
jobject globalRef = context->getJavaInstance();
|
||||
env->CallVoidMethod(globalRef, env->GetMethodID(NativeInstanceClass, "onCancelRequestBroadcastPart", "(JII)V"), _timestamp, _videoChannel, (jint) _quality);
|
||||
if (_videoChannel != 0) {
|
||||
for (auto videoTaskIter = context->videoStreamTasks.begin(); videoTaskIter != context->videoStreamTasks.end(); videoTaskIter++) {
|
||||
if (((BroadcastPartTaskJava *) videoTaskIter->get())->isValidTaskFor(_timestamp, _videoChannel, _quality)) {
|
||||
context->videoStreamTasks.erase(videoTaskIter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (auto audioTaskIter = context->audioStreamTasks.begin(); audioTaskIter != context->audioStreamTasks.end(); audioTaskIter++) {
|
||||
if (((BroadcastPartTaskJava *) audioTaskIter->get())->isValidTaskFor(_timestamp, _videoChannel, _quality)) {
|
||||
context->audioStreamTasks.erase(audioTaskIter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
std::shared_ptr<PlatformContext> _platformContext;
|
||||
std::function<void(BroadcastPart &&)> _callback;
|
||||
int64_t _timestamp;
|
||||
int32_t _videoChannel;
|
||||
VideoChannelDescription::Quality _quality;
|
||||
};
|
||||
|
||||
class JavaObject {
|
||||
|
@ -399,12 +427,21 @@ JNIEXPORT jlong JNICALL Java_org_telegram_messenger_voip_NativeInstance_makeGrou
|
|||
.platformContext = platformContext
|
||||
};
|
||||
if (!screencast) {
|
||||
descriptor.requestBroadcastPart = [](std::shared_ptr<PlatformContext> platformContext, int64_t timestamp, int64_t duration, std::function<void(BroadcastPart &&)> callback) -> std::shared_ptr<BroadcastPartTask> {
|
||||
std::shared_ptr<BroadcastPartTask> task = std::make_shared<BroadcastPartTaskJava>(platformContext, callback, timestamp);
|
||||
((AndroidContext *) platformContext.get())->streamTask = task;
|
||||
descriptor.requestAudioBroadcastPart = [](std::shared_ptr<PlatformContext> platformContext, int64_t timestamp, int64_t duration, std::function<void(BroadcastPart &&)> callback) -> std::shared_ptr<BroadcastPartTask> {
|
||||
std::shared_ptr<BroadcastPartTask> task = std::make_shared<BroadcastPartTaskJava>(platformContext, callback, timestamp, 0, VideoChannelDescription::Quality::Full);
|
||||
((AndroidContext *) platformContext.get())->audioStreamTasks.push_back(task);
|
||||
tgvoip::jni::DoWithJNI([platformContext, timestamp, duration, task](JNIEnv *env) {
|
||||
jobject globalRef = ((AndroidContext *) platformContext.get())->getJavaInstance();
|
||||
env->CallVoidMethod(globalRef, env->GetMethodID(NativeInstanceClass, "onRequestBroadcastPart", "(JJ)V"), timestamp, duration);
|
||||
env->CallVoidMethod(globalRef, env->GetMethodID(NativeInstanceClass, "onRequestBroadcastPart", "(JJII)V"), timestamp, duration, 0, 0);
|
||||
});
|
||||
return task;
|
||||
};
|
||||
descriptor.requestVideoBroadcastPart = [](std::shared_ptr<PlatformContext> platformContext, int64_t timestamp, int64_t duration, int32_t video_channel, VideoChannelDescription::Quality quality, std::function<void(BroadcastPart &&)> callback) -> std::shared_ptr<BroadcastPartTask> {
|
||||
std::shared_ptr<BroadcastPartTask> task = std::make_shared<BroadcastPartTaskJava>(platformContext, callback, timestamp, video_channel, quality);
|
||||
((AndroidContext *) platformContext.get())->videoStreamTasks.push_back(task);
|
||||
tgvoip::jni::DoWithJNI([platformContext, timestamp, duration, task, video_channel, quality](JNIEnv *env) {
|
||||
jobject globalRef = ((AndroidContext *) platformContext.get())->getJavaInstance();
|
||||
env->CallVoidMethod(globalRef, env->GetMethodID(NativeInstanceClass, "onRequestBroadcastPart", "(JJII)V"), timestamp, duration, video_channel, (jint) quality);
|
||||
});
|
||||
return task;
|
||||
};
|
||||
|
@ -812,20 +849,37 @@ JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_stopGroup
|
|||
delete instance;
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_onStreamPartAvailable(JNIEnv *env, jobject obj, jlong ts, jobject byteBuffer, jint size, jlong responseTs) {
|
||||
JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_onStreamPartAvailable(JNIEnv *env, jobject obj, jlong ts, jobject byteBuffer, jint size, jlong responseTs, jint videoChannel, jint quality) {
|
||||
InstanceHolder *instance = getInstanceHolder(env, obj);
|
||||
if (instance->groupNativeInstance == nullptr) {
|
||||
return;
|
||||
}
|
||||
auto context = (AndroidContext *) instance->_platformContext.get();
|
||||
std::shared_ptr<BroadcastPartTask> streamTask = context->streamTask;
|
||||
auto task = (BroadcastPartTaskJava *) streamTask.get();
|
||||
std::shared_ptr<BroadcastPartTask> task;
|
||||
auto q = (VideoChannelDescription::Quality) quality;
|
||||
if (videoChannel != 0) {
|
||||
for (auto videoTaskIter = context->videoStreamTasks.begin(); videoTaskIter != context->videoStreamTasks.end(); videoTaskIter++) {
|
||||
if (((BroadcastPartTaskJava *) videoTaskIter->get())->isValidTaskFor(ts, videoChannel, q)) {
|
||||
task = *videoTaskIter;
|
||||
context->videoStreamTasks.erase(videoTaskIter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (auto audioTaskIter = context->audioStreamTasks.begin(); audioTaskIter != context->audioStreamTasks.end(); audioTaskIter++) {
|
||||
if (((BroadcastPartTaskJava *) audioTaskIter->get())->isValidTaskFor(ts, 0, q)) {
|
||||
task = *audioTaskIter;
|
||||
context->audioStreamTasks.erase(audioTaskIter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (task != nullptr) {
|
||||
if (byteBuffer != nullptr) {
|
||||
auto buf = (uint8_t *) env->GetDirectBufferAddress(byteBuffer);
|
||||
task->call(ts, responseTs, BroadcastPart::Status::Success, buf, size);
|
||||
((BroadcastPartTaskJava *) task.get())->call(ts, responseTs, BroadcastPart::Status::Success, buf, size);
|
||||
} else {
|
||||
task->call(ts, responseTs, size == 0 ? BroadcastPart::Status::NotReady : BroadcastPart::Status::ResyncNeeded, nullptr, 0);
|
||||
((BroadcastPartTaskJava *) task.get())->call(ts, responseTs, size == 0 ? BroadcastPart::Status::NotReady : BroadcastPart::Status::ResyncNeeded, nullptr, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -886,7 +940,7 @@ JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_destroyVi
|
|||
|
||||
JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_switchCameraCapturer(JNIEnv *env, jclass clazz, jlong videoCapturer, jboolean front) {
|
||||
auto capturer = reinterpret_cast<VideoCaptureInterface *>(videoCapturer);
|
||||
capturer->switchToDevice(front ? "front" : "back");
|
||||
capturer->switchToDevice(front ? "front" : "back", false);
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_setVideoStateCapturer(JNIEnv *env, jclass clazz, jlong videoCapturer, jint videoState) {
|
||||
|
@ -899,7 +953,7 @@ JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_switchCam
|
|||
if (instance->_videoCapture == nullptr) {
|
||||
return;
|
||||
}
|
||||
instance->_videoCapture->switchToDevice(front ? "front" : "back");
|
||||
instance->_videoCapture->switchToDevice(front ? "front" : "back", false);
|
||||
}
|
||||
|
||||
JNIEXPORT jboolean JNICALL Java_org_telegram_messenger_voip_NativeInstance_hasVideoCapturer(JNIEnv *env, jobject obj) {
|
||||
|
|
|
@ -204,6 +204,7 @@ public:
|
|||
|
||||
virtual void receiveSignalingData(const std::vector<uint8_t> &data) = 0;
|
||||
virtual void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture) = 0;
|
||||
virtual void sendVideoDeviceUpdated() = 0;
|
||||
virtual void setRequestedVideoAspect(float aspect) = 0;
|
||||
|
||||
virtual void stop(std::function<void(FinalState)> completion) = 0;
|
||||
|
|
|
@ -58,6 +58,12 @@ void InstanceImpl::setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoC
|
|||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::sendVideoDeviceUpdated() {
|
||||
_manager->perform(RTC_FROM_HERE, [](Manager *manager) {
|
||||
manager->sendVideoDeviceUpdated();
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setRequestedVideoAspect(float aspect) {
|
||||
_manager->perform(RTC_FROM_HERE, [aspect](Manager *manager) {
|
||||
manager->setRequestedVideoAspect(aspect);
|
||||
|
|
|
@ -21,6 +21,7 @@ public:
|
|||
|
||||
void receiveSignalingData(const std::vector<uint8_t> &data) override;
|
||||
void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture) override;
|
||||
void sendVideoDeviceUpdated() override;
|
||||
void setRequestedVideoAspect(float aspect) override;
|
||||
void setNetworkType(NetworkType networkType) override;
|
||||
void setMuteMicrophone(bool muteMicrophone) override;
|
||||
|
|
|
@ -316,6 +316,12 @@ void Manager::setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCaptur
|
|||
});
|
||||
}
|
||||
|
||||
void Manager::sendVideoDeviceUpdated() {
|
||||
_mediaManager->perform(RTC_FROM_HERE, [](MediaManager *mediaManager) {
|
||||
mediaManager->sendVideoDeviceUpdated();
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::setRequestedVideoAspect(float aspect) {
|
||||
_mediaManager->perform(RTC_FROM_HERE, [aspect](MediaManager *mediaManager) {
|
||||
mediaManager->setRequestedVideoAspect(aspect);
|
||||
|
|
|
@ -29,6 +29,7 @@ public:
|
|||
void start();
|
||||
void receiveSignalingData(const std::vector<uint8_t> &data);
|
||||
void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture);
|
||||
void sendVideoDeviceUpdated();
|
||||
void setRequestedVideoAspect(float aspect);
|
||||
void setMuteOutgoingAudio(bool mute);
|
||||
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
|
|
|
@ -641,17 +641,18 @@ void MediaManager::setSendVideo(std::shared_ptr<VideoCaptureInterface> videoCapt
|
|||
_videoCapture = videoCapture;
|
||||
if (_videoCapture) {
|
||||
_videoCapture->setPreferredAspectRatio(_preferredAspectRatio);
|
||||
_isScreenCapture = _videoCapture->isScreenCapture();
|
||||
|
||||
const auto thread = _thread;
|
||||
const auto weak = std::weak_ptr<MediaManager>(shared_from_this());
|
||||
GetVideoCaptureAssumingSameThread(_videoCapture.get())->setStateUpdated([=](VideoState state) {
|
||||
thread->PostTask(RTC_FROM_HERE, [=] {
|
||||
if (const auto strong = weak.lock()) {
|
||||
strong->setOutgoingVideoState(state);
|
||||
}
|
||||
});
|
||||
});
|
||||
const auto thread = _thread;
|
||||
const auto weak = std::weak_ptr<MediaManager>(shared_from_this());
|
||||
const auto object = GetVideoCaptureAssumingSameThread(_videoCapture.get());
|
||||
_isScreenCapture = object->isScreenCapture();
|
||||
object->setStateUpdated([=](VideoState state) {
|
||||
thread->PostTask(RTC_FROM_HERE, [=] {
|
||||
if (const auto strong = weak.lock()) {
|
||||
strong->setOutgoingVideoState(state);
|
||||
}
|
||||
});
|
||||
});
|
||||
setOutgoingVideoState(VideoState::Active);
|
||||
} else {
|
||||
_isScreenCapture = false;
|
||||
|
@ -681,6 +682,18 @@ void MediaManager::setSendVideo(std::shared_ptr<VideoCaptureInterface> videoCapt
|
|||
checkIsReceivingVideoChanged(wasReceiving);
|
||||
}
|
||||
|
||||
void MediaManager::sendVideoDeviceUpdated() {
|
||||
if (!computeIsSendingVideo()) {
|
||||
return;
|
||||
}
|
||||
const auto wasScreenCapture = _isScreenCapture;
|
||||
const auto object = GetVideoCaptureAssumingSameThread(_videoCapture.get());
|
||||
_isScreenCapture = object->isScreenCapture();
|
||||
if (_isScreenCapture != wasScreenCapture) {
|
||||
adjustBitratePreferences(true);
|
||||
}
|
||||
}
|
||||
|
||||
void MediaManager::setRequestedVideoAspect(float aspect) {
|
||||
if (_localPreferredVideoAspectRatio != aspect) {
|
||||
_localPreferredVideoAspectRatio = aspect;
|
||||
|
|
|
@ -58,6 +58,7 @@ public:
|
|||
void setIsConnected(bool isConnected);
|
||||
void notifyPacketSent(const rtc::SentPacket &sentPacket);
|
||||
void setSendVideo(std::shared_ptr<VideoCaptureInterface> videoCapture);
|
||||
void sendVideoDeviceUpdated();
|
||||
void setRequestedVideoAspect(float aspect);
|
||||
void setMuteOutgoingAudio(bool mute);
|
||||
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
|
|
|
@ -39,8 +39,7 @@ public:
|
|||
|
||||
virtual ~VideoCaptureInterface();
|
||||
|
||||
virtual bool isScreenCapture() = 0;
|
||||
virtual void switchToDevice(std::string deviceId) = 0;
|
||||
virtual void switchToDevice(std::string deviceId, bool isScreenCapture) = 0;
|
||||
virtual void setState(VideoState state) = 0;
|
||||
virtual void setPreferredAspectRatio(float aspectRatio) = 0;
|
||||
virtual void setOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) = 0;
|
||||
|
|
|
@ -9,11 +9,11 @@
|
|||
|
||||
namespace tgcalls {
|
||||
|
||||
VideoCaptureInterfaceObject::VideoCaptureInterfaceObject(std::string deviceId, std::shared_ptr<PlatformContext> platformContext, Threads &threads)
|
||||
: _videoSource(PlatformInterface::SharedInstance()->makeVideoSource(threads.getMediaThread(), threads.getWorkerThread(), deviceId == "screen")) {
|
||||
VideoCaptureInterfaceObject::VideoCaptureInterfaceObject(std::string deviceId, bool isScreenCapture, std::shared_ptr<PlatformContext> platformContext, Threads &threads)
|
||||
: _videoSource(PlatformInterface::SharedInstance()->makeVideoSource(threads.getMediaThread(), threads.getWorkerThread(), isScreenCapture)) {
|
||||
_platformContext = platformContext;
|
||||
|
||||
switchToDevice(deviceId);
|
||||
|
||||
switchToDevice(deviceId, isScreenCapture);
|
||||
}
|
||||
|
||||
VideoCaptureInterfaceObject::~VideoCaptureInterfaceObject() {
|
||||
|
@ -34,13 +34,18 @@ int VideoCaptureInterfaceObject::getRotation() {
|
|||
}
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::switchToDevice(std::string deviceId) {
|
||||
if (_videoCapturer && _currentUncroppedSink != nullptr) {
|
||||
bool VideoCaptureInterfaceObject::isScreenCapture() {
|
||||
return _isScreenCapture;
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::switchToDevice(std::string deviceId, bool isScreenCapture) {
|
||||
if (_videoCapturer) {
|
||||
_videoCapturer->setUncroppedOutput(nullptr);
|
||||
}
|
||||
_isScreenCapture = isScreenCapture;
|
||||
if (_videoSource) {
|
||||
//this should outlive the capturer
|
||||
_videoCapturer = NULL;
|
||||
_videoCapturer = nullptr;
|
||||
_videoCapturer = PlatformInterface::SharedInstance()->makeVideoCapturer(_videoSource, deviceId, [this](VideoState state) {
|
||||
if (this->_stateUpdated) {
|
||||
this->_stateUpdated(state);
|
||||
|
@ -164,23 +169,19 @@ void VideoCaptureInterfaceObject::setRotationUpdated(std::function<void(int)> ro
|
|||
|
||||
VideoCaptureInterfaceImpl::VideoCaptureInterfaceImpl(std::string deviceId, bool isScreenCapture, std::shared_ptr<PlatformContext> platformContext, std::shared_ptr<Threads> threads) :
|
||||
_platformContext(platformContext),
|
||||
_impl(threads->getMediaThread(), [deviceId, platformContext, threads]() {
|
||||
return new VideoCaptureInterfaceObject(deviceId, platformContext, *threads);
|
||||
}), _isScreenCapture(isScreenCapture) {
|
||||
_impl(threads->getMediaThread(), [deviceId, isScreenCapture, platformContext, threads]() {
|
||||
return new VideoCaptureInterfaceObject(deviceId, isScreenCapture, platformContext, *threads);
|
||||
}) {
|
||||
}
|
||||
|
||||
VideoCaptureInterfaceImpl::~VideoCaptureInterfaceImpl() = default;
|
||||
|
||||
void VideoCaptureInterfaceImpl::switchToDevice(std::string deviceId) {
|
||||
_impl.perform(RTC_FROM_HERE, [deviceId](VideoCaptureInterfaceObject *impl) {
|
||||
impl->switchToDevice(deviceId);
|
||||
void VideoCaptureInterfaceImpl::switchToDevice(std::string deviceId, bool isScreenCapture) {
|
||||
_impl.perform(RTC_FROM_HERE, [deviceId, isScreenCapture](VideoCaptureInterfaceObject *impl) {
|
||||
impl->switchToDevice(deviceId, isScreenCapture);
|
||||
});
|
||||
}
|
||||
|
||||
bool VideoCaptureInterfaceImpl::isScreenCapture() {
|
||||
return _isScreenCapture;
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceImpl::withNativeImplementation(std::function<void(void *)> completion) {
|
||||
_impl.perform(RTC_FROM_HERE, [completion](VideoCaptureInterfaceObject *impl) {
|
||||
impl->withNativeImplementation(completion);
|
||||
|
|
|
@ -14,10 +14,10 @@ class Threads;
|
|||
|
||||
class VideoCaptureInterfaceObject {
|
||||
public:
|
||||
VideoCaptureInterfaceObject(std::string deviceId, std::shared_ptr<PlatformContext> platformContext, Threads &threads);
|
||||
VideoCaptureInterfaceObject(std::string deviceId, bool isScreenCapture, std::shared_ptr<PlatformContext> platformContext, Threads &threads);
|
||||
~VideoCaptureInterfaceObject();
|
||||
|
||||
void switchToDevice(std::string deviceId);
|
||||
void switchToDevice(std::string deviceId, bool isScreenCapture);
|
||||
void withNativeImplementation(std::function<void(void *)> completion);
|
||||
void setState(VideoState state);
|
||||
void setPreferredAspectRatio(float aspectRatio);
|
||||
|
@ -29,10 +29,11 @@ public:
|
|||
void setOnIsActiveUpdated(std::function<void(bool)> onIsActiveUpdated);
|
||||
webrtc::VideoTrackSourceInterface *source();
|
||||
int getRotation();
|
||||
bool isScreenCapture();
|
||||
|
||||
private:
|
||||
void updateAspectRateAdaptation();
|
||||
|
||||
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> _videoSource;
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> _currentUncroppedSink;
|
||||
std::shared_ptr<PlatformContext> _platformContext;
|
||||
|
@ -46,6 +47,7 @@ private:
|
|||
VideoState _state = VideoState::Active;
|
||||
float _preferredAspectRatio = 0.0f;
|
||||
bool _shouldBeAdaptedToReceiverAspectRate = true;
|
||||
bool _isScreenCapture = false;
|
||||
};
|
||||
|
||||
class VideoCaptureInterfaceImpl : public VideoCaptureInterface {
|
||||
|
@ -53,8 +55,7 @@ public:
|
|||
VideoCaptureInterfaceImpl(std::string deviceId, bool isScreenCapture, std::shared_ptr<PlatformContext> platformContext, std::shared_ptr<Threads> threads);
|
||||
virtual ~VideoCaptureInterfaceImpl();
|
||||
|
||||
bool isScreenCapture() override;
|
||||
void switchToDevice(std::string deviceId) override;
|
||||
void switchToDevice(std::string deviceId, bool isScreenCapture) override;
|
||||
void withNativeImplementation(std::function<void(void *)> completion) override;
|
||||
void setState(VideoState state) override;
|
||||
void setPreferredAspectRatio(float aspectRatio) override;
|
||||
|
@ -68,8 +69,6 @@ public:
|
|||
|
||||
private:
|
||||
ThreadLocalObject<VideoCaptureInterfaceObject> _impl;
|
||||
|
||||
bool _isScreenCapture = false;
|
||||
|
||||
std::shared_ptr<PlatformContext> _platformContext;
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#include "StreamingPart.h"
|
||||
#include "AudioStreamingPart.h"
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/third_party/base64/base64.h"
|
||||
|
@ -10,6 +10,7 @@ extern "C" {
|
|||
}
|
||||
|
||||
#include <string>
|
||||
#include <bitset>
|
||||
#include <set>
|
||||
#include <map>
|
||||
|
||||
|
@ -17,6 +18,28 @@ namespace tgcalls {
|
|||
|
||||
namespace {
|
||||
|
||||
uint32_t stringToUInt32(std::string const &string) {
|
||||
std::stringstream stringStream(string);
|
||||
uint32_t value = 0;
|
||||
stringStream >> value;
|
||||
return value;
|
||||
}
|
||||
|
||||
template <typename Out>
|
||||
void splitString(const std::string &s, char delim, Out result) {
|
||||
std::istringstream iss(s);
|
||||
std::string item;
|
||||
while (std::getline(iss, item, delim)) {
|
||||
*result++ = item;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> splitString(const std::string &s, char delim) {
|
||||
std::vector<std::string> elems;
|
||||
splitString(s, delim, std::back_inserter(elems));
|
||||
return elems;
|
||||
}
|
||||
|
||||
static absl::optional<uint32_t> readInt32(std::string const &data, int &offset) {
|
||||
if (offset + 4 > data.length()) {
|
||||
return absl::nullopt;
|
||||
|
@ -139,9 +162,9 @@ struct ReadPcmResult {
|
|||
int numChannels = 0;
|
||||
};
|
||||
|
||||
class StreamingPartInternal {
|
||||
class AudioStreamingPartInternal {
|
||||
public:
|
||||
StreamingPartInternal(std::vector<uint8_t> &&fileData) :
|
||||
AudioStreamingPartInternal(std::vector<uint8_t> &&fileData) :
|
||||
_avIoContext(std::move(fileData)) {
|
||||
int ret = 0;
|
||||
|
||||
|
@ -201,6 +224,31 @@ public:
|
|||
_channelUpdates = parseChannelUpdates(result, offset);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t videoChannelMask = 0;
|
||||
entry = av_dict_get(inStream->metadata, "ACTIVE_MASK", nullptr, 0);
|
||||
if (entry && entry->value) {
|
||||
std::string sourceString = (const char *)entry->value;
|
||||
videoChannelMask = stringToUInt32(sourceString);
|
||||
}
|
||||
|
||||
std::vector<std::string> endpointList;
|
||||
entry = av_dict_get(inStream->metadata, "ENDPOINTS", nullptr, 0);
|
||||
if (entry && entry->value) {
|
||||
std::string sourceString = (const char *)entry->value;
|
||||
endpointList = splitString(sourceString, ' ');
|
||||
}
|
||||
|
||||
std::bitset<32> videoChannels(videoChannelMask);
|
||||
size_t endpointIndex = 0;
|
||||
if (videoChannels.count() == endpointList.size()) {
|
||||
for (size_t i = 0; i < videoChannels.size(); i++) {
|
||||
if (videoChannels[i]) {
|
||||
_endpointMapping.insert(std::make_pair(endpointList[endpointIndex], i));
|
||||
endpointIndex++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -233,7 +281,7 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
~StreamingPartInternal() {
|
||||
~AudioStreamingPartInternal() {
|
||||
if (_frame) {
|
||||
av_frame_unref(_frame);
|
||||
}
|
||||
|
@ -283,10 +331,14 @@ public:
|
|||
return _channelCount;
|
||||
}
|
||||
|
||||
std::vector<ChannelUpdate> const &getChannelUpdates() {
|
||||
std::vector<ChannelUpdate> const &getChannelUpdates() const {
|
||||
return _channelUpdates;
|
||||
}
|
||||
|
||||
std::map<std::string, int32_t> getEndpointMapping() const {
|
||||
return _endpointMapping;
|
||||
}
|
||||
|
||||
private:
|
||||
static int16_t sampleFloatToInt16(float sample) {
|
||||
return av_clip_int16 (static_cast<int32_t>(lrint(sample*32767)));
|
||||
|
@ -399,13 +451,14 @@ private:
|
|||
int _channelCount = 0;
|
||||
|
||||
std::vector<ChannelUpdate> _channelUpdates;
|
||||
std::map<std::string, int32_t> _endpointMapping;
|
||||
|
||||
std::vector<int16_t> _pcmBuffer;
|
||||
int _pcmBufferSampleOffset = 0;
|
||||
int _pcmBufferSampleSize = 0;
|
||||
};
|
||||
|
||||
class StreamingPartState {
|
||||
class AudioStreamingPartState {
|
||||
struct ChannelMapping {
|
||||
uint32_t ssrc = 0;
|
||||
int channelIndex = 0;
|
||||
|
@ -416,7 +469,7 @@ class StreamingPartState {
|
|||
};
|
||||
|
||||
public:
|
||||
StreamingPartState(std::vector<uint8_t> &&data) :
|
||||
AudioStreamingPartState(std::vector<uint8_t> &&data) :
|
||||
_parsedPart(std::move(data)) {
|
||||
if (_parsedPart.getChannelUpdates().size() == 0) {
|
||||
_didReadToEnd = true;
|
||||
|
@ -431,14 +484,18 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
~StreamingPartState() {
|
||||
~AudioStreamingPartState() {
|
||||
}
|
||||
|
||||
std::map<std::string, int32_t> getEndpointMapping() const {
|
||||
return _parsedPart.getEndpointMapping();
|
||||
}
|
||||
|
||||
int getRemainingMilliseconds() const {
|
||||
return _remainingMilliseconds;
|
||||
}
|
||||
|
||||
std::vector<StreamingPart::StreamingPartChannel> get10msPerChannel() {
|
||||
std::vector<AudioStreamingPart::StreamingPartChannel> get10msPerChannel() {
|
||||
if (_didReadToEnd) {
|
||||
return {};
|
||||
}
|
||||
|
@ -455,9 +512,9 @@ public:
|
|||
return {};
|
||||
}
|
||||
|
||||
std::vector<StreamingPart::StreamingPartChannel> resultChannels;
|
||||
std::vector<AudioStreamingPart::StreamingPartChannel> resultChannels;
|
||||
for (const auto ssrc : _allSsrcs) {
|
||||
StreamingPart::StreamingPartChannel emptyPart;
|
||||
AudioStreamingPart::StreamingPartChannel emptyPart;
|
||||
emptyPart.ssrc = ssrc;
|
||||
resultChannels.push_back(emptyPart);
|
||||
}
|
||||
|
@ -509,7 +566,7 @@ private:
|
|||
}
|
||||
|
||||
private:
|
||||
StreamingPartInternal _parsedPart;
|
||||
AudioStreamingPartInternal _parsedPart;
|
||||
std::set<uint32_t> _allSsrcs;
|
||||
|
||||
std::vector<int16_t> _pcm10ms;
|
||||
|
@ -520,26 +577,30 @@ private:
|
|||
bool _didReadToEnd = false;
|
||||
};
|
||||
|
||||
StreamingPart::StreamingPart(std::vector<uint8_t> &&data) {
|
||||
AudioStreamingPart::AudioStreamingPart(std::vector<uint8_t> &&data) {
|
||||
if (!data.empty()) {
|
||||
_state = new StreamingPartState(std::move(data));
|
||||
_state = new AudioStreamingPartState(std::move(data));
|
||||
}
|
||||
}
|
||||
|
||||
StreamingPart::~StreamingPart() {
|
||||
AudioStreamingPart::~AudioStreamingPart() {
|
||||
if (_state) {
|
||||
delete _state;
|
||||
}
|
||||
}
|
||||
|
||||
int StreamingPart::getRemainingMilliseconds() const {
|
||||
std::map<std::string, int32_t> AudioStreamingPart::getEndpointMapping() const {
|
||||
return _state ? _state->getEndpointMapping() : std::map<std::string, int32_t>();
|
||||
}
|
||||
|
||||
int AudioStreamingPart::getRemainingMilliseconds() const {
|
||||
return _state ? _state->getRemainingMilliseconds() : 0;
|
||||
}
|
||||
|
||||
std::vector<StreamingPart::StreamingPartChannel> StreamingPart::get10msPerChannel() {
|
||||
std::vector<AudioStreamingPart::StreamingPartChannel> AudioStreamingPart::get10msPerChannel() {
|
||||
return _state
|
||||
? _state->get10msPerChannel()
|
||||
: std::vector<StreamingPart::StreamingPartChannel>();
|
||||
: std::vector<AudioStreamingPart::StreamingPartChannel>();
|
||||
}
|
||||
|
||||
}
|
41
TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.h
Normal file
41
TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.h
Normal file
|
@ -0,0 +1,41 @@
|
|||
#ifndef TGCALLS_AUDIO_STREAMING_PART_H
|
||||
#define TGCALLS_AUDIO_STREAMING_PART_H
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <stdint.h>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class AudioStreamingPartState;
|
||||
|
||||
class AudioStreamingPart {
|
||||
public:
|
||||
struct StreamingPartChannel {
|
||||
uint32_t ssrc = 0;
|
||||
std::vector<int16_t> pcmData;
|
||||
};
|
||||
|
||||
explicit AudioStreamingPart(std::vector<uint8_t> &&data);
|
||||
~AudioStreamingPart();
|
||||
|
||||
AudioStreamingPart(const AudioStreamingPart&) = delete;
|
||||
AudioStreamingPart(AudioStreamingPart&& other) {
|
||||
_state = other._state;
|
||||
other._state = nullptr;
|
||||
}
|
||||
AudioStreamingPart& operator=(const AudioStreamingPart&) = delete;
|
||||
AudioStreamingPart& operator=(AudioStreamingPart&&) = delete;
|
||||
|
||||
std::map<std::string, int32_t> getEndpointMapping() const;
|
||||
int getRemainingMilliseconds() const;
|
||||
std::vector<StreamingPartChannel> get10msPerChannel();
|
||||
|
||||
private:
|
||||
AudioStreamingPartState *_state = nullptr;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
|
@ -35,6 +35,7 @@
|
|||
#include "modules/audio_coding/neteq/default_neteq_factory.h"
|
||||
#include "modules/audio_coding/include/audio_coding_module.h"
|
||||
#include "common_audio/include/audio_util.h"
|
||||
#include "modules/audio_device/include/audio_device_data_observer.h"
|
||||
|
||||
#include "AudioFrame.h"
|
||||
#include "ThreadLocalObject.h"
|
||||
|
@ -44,9 +45,11 @@
|
|||
#include "platform/PlatformInterface.h"
|
||||
#include "LogSinkImpl.h"
|
||||
#include "CodecSelectHelper.h"
|
||||
#include "StreamingPart.h"
|
||||
#include "AudioStreamingPart.h"
|
||||
#include "VideoStreamingPart.h"
|
||||
#include "AudioDeviceHelper.h"
|
||||
#include "FakeAudioDeviceModule.h"
|
||||
#include "StreamingMediaContext.h"
|
||||
|
||||
#include <mutex>
|
||||
#include <random>
|
||||
|
@ -1238,6 +1241,83 @@ std::function<webrtc::VideoTrackSourceInterface*()> videoCaptureToGetVideoSource
|
|||
};
|
||||
}
|
||||
|
||||
class AudioDeviceDataObserverShared {
|
||||
public:
|
||||
AudioDeviceDataObserverShared() {
|
||||
}
|
||||
|
||||
~AudioDeviceDataObserverShared() {
|
||||
}
|
||||
|
||||
void setStreamingContext(std::shared_ptr<StreamingMediaContext> streamingContext) {
|
||||
_mutex.Lock();
|
||||
_streamingContext = streamingContext;
|
||||
_mutex.Unlock();
|
||||
}
|
||||
|
||||
void mixAudio(int16_t *audio_samples, const size_t num_samples, const size_t num_channels, const uint32_t samples_per_sec) {
|
||||
const auto numSamplesOut = num_samples * num_channels;
|
||||
const auto numBytesOut = sizeof(int16_t) * numSamplesOut;
|
||||
if (samples_per_sec != 48000) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (_buffer.size() < numSamplesOut) {
|
||||
_buffer.resize(numSamplesOut);
|
||||
}
|
||||
|
||||
_mutex.Lock();
|
||||
const auto context = _streamingContext;
|
||||
_mutex.Unlock();
|
||||
|
||||
if (context) {
|
||||
context->getAudio(_buffer.data(), num_samples, num_channels, samples_per_sec);
|
||||
memcpy(audio_samples, _buffer.data(), numBytesOut);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
webrtc::Mutex _mutex;
|
||||
std::vector<int16_t> _buffer;
|
||||
std::shared_ptr<StreamingMediaContext> _streamingContext;
|
||||
};
|
||||
|
||||
class AudioDeviceDataObserverImpl : public webrtc::AudioDeviceDataObserver {
|
||||
public:
|
||||
AudioDeviceDataObserverImpl(std::shared_ptr<AudioDeviceDataObserverShared> shared) :
|
||||
_shared(shared) {
|
||||
}
|
||||
|
||||
virtual ~AudioDeviceDataObserverImpl() {
|
||||
}
|
||||
|
||||
virtual void OnCaptureData(const void* audio_samples,
|
||||
const size_t num_samples,
|
||||
const size_t bytes_per_sample,
|
||||
const size_t num_channels,
|
||||
const uint32_t samples_per_sec) override {
|
||||
}
|
||||
|
||||
virtual void OnRenderData(const void* audio_samples,
|
||||
const size_t num_samples,
|
||||
const size_t bytes_per_sample,
|
||||
const size_t num_channels,
|
||||
const uint32_t samples_per_sec) override {
|
||||
if (samples_per_sec != 48000) {
|
||||
return;
|
||||
}
|
||||
if (bytes_per_sample != num_channels * 2) {
|
||||
return;
|
||||
}
|
||||
if (_shared) {
|
||||
_shared->mixAudio((int16_t *)audio_samples, num_samples, num_channels, samples_per_sec);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<AudioDeviceDataObserverShared> _shared;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
class GroupInstanceCustomInternal : public sigslot::has_slots<>, public std::enable_shared_from_this<GroupInstanceCustomInternal> {
|
||||
|
@ -1248,7 +1328,9 @@ public:
|
|||
_audioLevelsUpdated(descriptor.audioLevelsUpdated),
|
||||
_onAudioFrame(descriptor.onAudioFrame),
|
||||
_requestMediaChannelDescriptions(descriptor.requestMediaChannelDescriptions),
|
||||
_requestBroadcastPart(descriptor.requestBroadcastPart),
|
||||
_requestCurrentTime(descriptor.requestCurrentTime),
|
||||
_requestAudioBroadcastPart(descriptor.requestAudioBroadcastPart),
|
||||
_requestVideoBroadcastPart(descriptor.requestVideoBroadcastPart),
|
||||
_videoCapture(descriptor.videoCapture),
|
||||
_videoCaptureSink(new VideoSinkImpl("VideoCapture")),
|
||||
_getVideoSource(descriptor.getVideoSource),
|
||||
|
@ -1407,6 +1489,8 @@ public:
|
|||
}
|
||||
#endif
|
||||
|
||||
_audioDeviceDataObserverShared = std::make_shared<AudioDeviceDataObserverShared>();
|
||||
|
||||
_audioDeviceModule = createAudioDeviceModule();
|
||||
if (!_audioDeviceModule) {
|
||||
return;
|
||||
|
@ -1763,8 +1847,14 @@ public:
|
|||
}
|
||||
|
||||
void updateSsrcAudioLevel(uint32_t ssrc, uint8_t audioLevel, bool isSpeech) {
|
||||
float mappedLevel = ((float)audioLevel) / (float)(0x7f);
|
||||
mappedLevel = (fabs(1.0f - mappedLevel)) * 1.0f;
|
||||
float mappedLevelDb = ((float)audioLevel) / (float)(0x7f);
|
||||
|
||||
//mappedLevelDb = fabs(1.0f - mappedLevelDb);
|
||||
//float mappedLevel = pow(10.0f, mappedLevelDb * 0.1f);
|
||||
|
||||
//printf("mappedLevelDb: %f, mappedLevel: %f\n", mappedLevelDb, mappedLevel);
|
||||
|
||||
float mappedLevel = (fabs(1.0f - mappedLevelDb)) * 1.0f;
|
||||
|
||||
auto it = _audioLevels.find(ChannelId(ssrc));
|
||||
if (it != _audioLevels.end()) {
|
||||
|
@ -1795,18 +1885,18 @@ public:
|
|||
return;
|
||||
}
|
||||
|
||||
int64_t timestamp = rtc::TimeMillis();
|
||||
int64_t maxSampleTimeout = 400;
|
||||
//int64_t timestamp = rtc::TimeMillis();
|
||||
//int64_t maxSampleTimeout = 400;
|
||||
|
||||
GroupLevelsUpdate levelsUpdate;
|
||||
levelsUpdate.updates.reserve(strong->_audioLevels.size() + 1);
|
||||
for (auto &it : strong->_audioLevels) {
|
||||
if (it.second.value.level < 0.001f) {
|
||||
/*if (it.second.value.level < 0.001f) {
|
||||
continue;
|
||||
}
|
||||
if (it.second.timestamp <= timestamp - maxSampleTimeout) {
|
||||
continue;
|
||||
}
|
||||
}*/
|
||||
|
||||
uint32_t effectiveSsrc = it.first.actualSsrc;
|
||||
if (std::find_if(levelsUpdate.updates.begin(), levelsUpdate.updates.end(), [&](GroupLevelUpdate const &item) {
|
||||
|
@ -1825,10 +1915,12 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
it.second.value.level *= 0.5f;
|
||||
it.second.value.voice = false;
|
||||
//it.second.value.level *= 0.5f;
|
||||
//it.second.value.voice = false;
|
||||
}
|
||||
|
||||
strong->_audioLevels.clear();
|
||||
|
||||
auto myAudioLevel = strong->_myAudioLevel;
|
||||
myAudioLevel.isMuted = strong->_isMuted;
|
||||
levelsUpdate.updates.push_back(GroupLevelUpdate{ 0, myAudioLevel });
|
||||
|
@ -1906,26 +1998,7 @@ public:
|
|||
}
|
||||
|
||||
void updateBroadcastNetworkStatus() {
|
||||
auto timestamp = rtc::TimeMillis();
|
||||
|
||||
bool isBroadcastConnected = true;
|
||||
if (_lastBroadcastPartReceivedTimestamp < timestamp - 3000) {
|
||||
isBroadcastConnected = false;
|
||||
}
|
||||
|
||||
if (_broadcastEnabledUntilRtcIsConnectedAtTimestamp) {
|
||||
auto timestamp = rtc::TimeMillis();
|
||||
if (std::abs(timestamp - _broadcastEnabledUntilRtcIsConnectedAtTimestamp.value()) > 3000) {
|
||||
_broadcastEnabledUntilRtcIsConnectedAtTimestamp = absl::nullopt;
|
||||
if (_currentRequestedBroadcastPart) {
|
||||
if (_currentRequestedBroadcastPart->task) {
|
||||
_currentRequestedBroadcastPart->task->cancel();
|
||||
}
|
||||
_currentRequestedBroadcastPart.reset();
|
||||
}
|
||||
isBroadcastConnected = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (isBroadcastConnected != _isBroadcastConnected) {
|
||||
_isBroadcastConnected = isBroadcastConnected;
|
||||
|
@ -1933,214 +2006,6 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
absl::optional<DecodedBroadcastPart> getNextBroadcastPart() {
|
||||
while (true) {
|
||||
if (_sourceBroadcastParts.size() != 0) {
|
||||
auto readChannels = _sourceBroadcastParts[0]->get10msPerChannel();
|
||||
if (readChannels.size() == 0 || readChannels[0].pcmData.size() == 0) {
|
||||
_sourceBroadcastParts.erase(_sourceBroadcastParts.begin());
|
||||
} else {
|
||||
std::vector<DecodedBroadcastPart::DecodedBroadcastPartChannel> channels;
|
||||
|
||||
int numSamples = (int)readChannels[0].pcmData.size();
|
||||
|
||||
for (auto &readChannel : readChannels) {
|
||||
DecodedBroadcastPart::DecodedBroadcastPartChannel channel;
|
||||
channel.ssrc = readChannel.ssrc;
|
||||
channel.pcmData = std::move(readChannel.pcmData);
|
||||
channels.push_back(channel);
|
||||
}
|
||||
|
||||
absl::optional<DecodedBroadcastPart> decodedPart;
|
||||
decodedPart.emplace(numSamples, std::move(channels));
|
||||
|
||||
return decodedPart;
|
||||
}
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
void commitBroadcastPackets() {
|
||||
int numMillisecondsInQueue = 0;
|
||||
for (const auto &part : _sourceBroadcastParts) {
|
||||
numMillisecondsInQueue += part->getRemainingMilliseconds();
|
||||
}
|
||||
|
||||
int commitMilliseconds = 20;
|
||||
if (numMillisecondsInQueue > 1000) {
|
||||
commitMilliseconds = numMillisecondsInQueue - 1000;
|
||||
}
|
||||
|
||||
std::set<ChannelId> channelsWithActivity;
|
||||
|
||||
for (int msIndex = 0; msIndex < commitMilliseconds; msIndex += 10) {
|
||||
auto packetData = getNextBroadcastPart();
|
||||
if (!packetData) {
|
||||
break;
|
||||
}
|
||||
|
||||
for (const auto &decodedChannel : packetData->channels) {
|
||||
if (decodedChannel.ssrc == _outgoingAudioSsrc) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ChannelId channelSsrc = ChannelId(decodedChannel.ssrc + 1000, decodedChannel.ssrc);
|
||||
if (_incomingAudioChannels.find(channelSsrc) == _incomingAudioChannels.end()) {
|
||||
addIncomingAudioChannel(channelSsrc, true);
|
||||
}
|
||||
|
||||
webrtc::RtpPacket packet(nullptr, 12 + decodedChannel.pcmData.size() * 2);
|
||||
|
||||
packet.SetMarker(false);
|
||||
packet.SetPayloadType(112);
|
||||
|
||||
uint16_t packetSeq = 0;
|
||||
|
||||
auto it = _broadcastSeqBySsrc.find(channelSsrc.networkSsrc);
|
||||
if (it == _broadcastSeqBySsrc.end()) {
|
||||
packetSeq = 1000;
|
||||
_broadcastSeqBySsrc.insert(std::make_pair(channelSsrc.networkSsrc, packetSeq));
|
||||
} else {
|
||||
it->second++;
|
||||
packetSeq = it->second;
|
||||
}
|
||||
|
||||
packet.SetSequenceNumber(packetSeq);
|
||||
|
||||
packet.SetTimestamp(_broadcastTimestamp);
|
||||
|
||||
packet.SetSsrc(channelSsrc.networkSsrc);
|
||||
|
||||
uint8_t *payload = packet.SetPayloadSize(decodedChannel.pcmData.size() * 2);
|
||||
memcpy(payload, decodedChannel.pcmData.data(), decodedChannel.pcmData.size() * 2);
|
||||
|
||||
for (int i = 0; i < decodedChannel.pcmData.size() * 2; i += 2) {
|
||||
auto temp = payload[i];
|
||||
payload[i] = payload[i + 1];
|
||||
payload[i + 1] = temp;
|
||||
}
|
||||
|
||||
auto buffer = packet.Buffer();
|
||||
_threads->getWorkerThread()->Invoke<void>(RTC_FROM_HERE, [this, buffer]() {
|
||||
_call->Receiver()->DeliverPacket(webrtc::MediaType::AUDIO, buffer, -1);
|
||||
});
|
||||
|
||||
channelsWithActivity.insert(ChannelId(channelSsrc));
|
||||
}
|
||||
|
||||
for (auto channelId : channelsWithActivity) {
|
||||
const auto it = _incomingAudioChannels.find(channelId);
|
||||
if (it != _incomingAudioChannels.end()) {
|
||||
it->second->updateActivity();
|
||||
}
|
||||
}
|
||||
|
||||
_broadcastTimestamp += packetData->numSamples;
|
||||
}
|
||||
}
|
||||
|
||||
void requestNextBroadcastPart() {
|
||||
const auto weak = std::weak_ptr<GroupInstanceCustomInternal>(shared_from_this());
|
||||
auto requestedPartId = _nextBroadcastTimestampMilliseconds;
|
||||
auto task = _requestBroadcastPart(_platformContext, requestedPartId, _broadcastPartDurationMilliseconds, [weak, threads = _threads, requestedPartId](BroadcastPart &&part) {
|
||||
threads->getMediaThread()->PostTask(RTC_FROM_HERE, [weak, part = std::move(part), requestedPartId]() mutable {
|
||||
auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
if (strong->_currentRequestedBroadcastPart && strong->_currentRequestedBroadcastPart->timestamp == requestedPartId) {
|
||||
strong->onReceivedNextBroadcastPart(std::move(part));
|
||||
}
|
||||
});
|
||||
});
|
||||
if (_currentRequestedBroadcastPart) {
|
||||
if (_currentRequestedBroadcastPart->task) {
|
||||
_currentRequestedBroadcastPart->task->cancel();
|
||||
}
|
||||
_currentRequestedBroadcastPart.reset();
|
||||
}
|
||||
_currentRequestedBroadcastPart.emplace(requestedPartId, task);
|
||||
}
|
||||
|
||||
void requestNextBroadcastPartWithDelay(int timeoutMs) {
|
||||
const auto weak = std::weak_ptr<GroupInstanceCustomInternal>(shared_from_this());
|
||||
_threads->getMediaThread()->PostDelayedTask(RTC_FROM_HERE, [weak]() {
|
||||
auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
|
||||
strong->requestNextBroadcastPart();
|
||||
}, timeoutMs);
|
||||
}
|
||||
|
||||
void onReceivedNextBroadcastPart(BroadcastPart &&part) {
|
||||
_currentRequestedBroadcastPart.reset();
|
||||
|
||||
if (_connectionMode != GroupConnectionMode::GroupConnectionModeBroadcast && !_broadcastEnabledUntilRtcIsConnectedAtTimestamp) {
|
||||
return;
|
||||
}
|
||||
|
||||
int64_t responseTimestampMilliseconds = (int64_t)(part.responseTimestamp * 1000.0);
|
||||
|
||||
int64_t responseTimestampBoundary = (responseTimestampMilliseconds / _broadcastPartDurationMilliseconds) * _broadcastPartDurationMilliseconds;
|
||||
|
||||
switch (part.status) {
|
||||
case BroadcastPart::Status::Success: {
|
||||
_lastBroadcastPartReceivedTimestamp = rtc::TimeMillis();
|
||||
updateBroadcastNetworkStatus();
|
||||
|
||||
if (std::abs((int64_t)(part.responseTimestamp * 1000.0) - part.timestampMilliseconds) > 2000) {
|
||||
_nextBroadcastTimestampMilliseconds = std::max(part.timestampMilliseconds + _broadcastPartDurationMilliseconds, responseTimestampBoundary);
|
||||
} else {
|
||||
_nextBroadcastTimestampMilliseconds = part.timestampMilliseconds + _broadcastPartDurationMilliseconds;
|
||||
}
|
||||
_sourceBroadcastParts.emplace_back(new StreamingPart(std::move(part.oggData)));
|
||||
break;
|
||||
}
|
||||
case BroadcastPart::Status::NotReady: {
|
||||
_nextBroadcastTimestampMilliseconds = part.timestampMilliseconds;
|
||||
break;
|
||||
}
|
||||
case BroadcastPart::Status::ResyncNeeded: {
|
||||
_nextBroadcastTimestampMilliseconds = responseTimestampBoundary;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
//RTC_FATAL() << "Unknown part.status";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int64_t nextDelay = _nextBroadcastTimestampMilliseconds - responseTimestampMilliseconds;
|
||||
int clippedDelay = std::max((int)nextDelay, 100);
|
||||
|
||||
//RTC_LOG(LS_INFO) << "requestNextBroadcastPartWithDelay(" << clippedDelay << ") (from " << nextDelay << ")";
|
||||
|
||||
requestNextBroadcastPartWithDelay(clippedDelay);
|
||||
}
|
||||
|
||||
void beginBroadcastPartsDecodeTimer(int timeoutMs) {
|
||||
const auto weak = std::weak_ptr<GroupInstanceCustomInternal>(shared_from_this());
|
||||
_threads->getMediaThread()->PostDelayedTask(RTC_FROM_HERE, [weak]() {
|
||||
auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (strong->_connectionMode != GroupConnectionMode::GroupConnectionModeBroadcast && !strong->_broadcastEnabledUntilRtcIsConnectedAtTimestamp) {
|
||||
return;
|
||||
}
|
||||
|
||||
strong->commitBroadcastPackets();
|
||||
|
||||
strong->beginBroadcastPartsDecodeTimer(20);
|
||||
}, timeoutMs);
|
||||
}
|
||||
|
||||
void configureVideoParams() {
|
||||
if (!_sharedVideoInformation) {
|
||||
|
@ -2320,11 +2185,10 @@ public:
|
|||
|
||||
if (_broadcastEnabledUntilRtcIsConnectedAtTimestamp) {
|
||||
_broadcastEnabledUntilRtcIsConnectedAtTimestamp = absl::nullopt;
|
||||
if (_currentRequestedBroadcastPart) {
|
||||
if (_currentRequestedBroadcastPart->task) {
|
||||
_currentRequestedBroadcastPart->task->cancel();
|
||||
}
|
||||
_currentRequestedBroadcastPart.reset();
|
||||
|
||||
if (_streamingContext) {
|
||||
_streamingContext.reset();
|
||||
_audioDeviceDataObserverShared->setStreamingContext(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2710,11 +2574,9 @@ public:
|
|||
if (keepBroadcastIfWasEnabled) {
|
||||
_broadcastEnabledUntilRtcIsConnectedAtTimestamp = rtc::TimeMillis();
|
||||
} else {
|
||||
if (_currentRequestedBroadcastPart) {
|
||||
if (_currentRequestedBroadcastPart->task) {
|
||||
_currentRequestedBroadcastPart->task->cancel();
|
||||
}
|
||||
_currentRequestedBroadcastPart.reset();
|
||||
if (_streamingContext) {
|
||||
_streamingContext.reset();
|
||||
_audioDeviceDataObserverShared->setStreamingContext(nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2743,12 +2605,50 @@ public:
|
|||
break;
|
||||
}
|
||||
case GroupConnectionMode::GroupConnectionModeBroadcast: {
|
||||
_broadcastTimestamp = 100001;
|
||||
|
||||
_isBroadcastConnected = false;
|
||||
|
||||