創(chuàng)建videoCapturer
首先在Call.java中執(zhí)行函數(shù) private VideoCapturer createVideoCapturer(int index)來創(chuàng)建videoCapturer,具體代碼
private VideoCapturer createVideoCapturer(int index) {
VideoCapturer videoCapturer;
if (useCamera2()) {
if (!captureToTexture()) {
return null;
}
Logger.d("useCamera2");
videoCapturer = createCameraCapturer(new Camera2Enumerator(mContext), index);
} else {
Logger.d("useCamera1");
videoCapturer = createCameraCapturer(new Camera1Enumerator(captureToTexture()), index);
}
return videoCapturer;
}
以Camera1為例,具體使用Camera1Enumerator來創(chuàng)建videoCapturer。執(zhí)行createCameraCapturer函數(shù),在該函數(shù)中調(diào)用Camera1Enumerator對(duì)videoCapturer進(jìn)行創(chuàng)建。
private VideoCapturer createCameraCapturer(CameraEnumerator enumerator, int index) {
...
VideoCapturer videoCapturer = enumerator.createCapturer(deviceName, new CameraVideoCapturer.CameraEventsHandler() {
...
在文件Camera1Enumerator.java中執(zhí)行如下代碼,最有返回Camera1Capturer類型的變量,可見videoCapturer真正的類型是Camera1Capturer。
public CameraVideoCapturer createCapturer(String deviceName, CameraEventsHandler eventsHandler) {
return new Camera1Capturer(deviceName, eventsHandler, this.captureToTexture);
}
創(chuàng)建source 和 track,并啟動(dòng)攝像頭進(jìn)行采集
首先在文件PeerConnectionClient.java中執(zhí)行createPeerConnectionInternal函數(shù),在該函數(shù)中執(zhí)行mediaStream.addTrack(createVideoTrack(videoCapturer));進(jìn)行source和track的具體創(chuàng)建。
private void createPeerConnectionInternal(EglBase.Context renderEGLContext) {
...
if (peerConnectionParameters.streamMode != StreamMode.RECV_ONLY) {
mediaStream = factory.createLocalMediaStream("ROBOT");
if (videoCallEnabled) {
mediaStream.addTrack(
createVideoTrack(videoCapturer)
);
setRenderVideoTrack();
if (bottomVideoCapturer != null) {
mediaStream.addTrack(
createBottomVideoTrack(bottomVideoCapturer)
);
setBottomRenderVideoTrack();
mediaStream.addTrack(bottomVideoTrack);
}
}
mediaStream.addTrack(createAudioTrack());
peerConnection.addStream(mediaStream);
if (videoCallEnabled) {
findVideoSender();
}
}
...
執(zhí)行createVideoTrack。
private VideoTrack createVideoTrack(VideoCapturer capturer) {
videoSource = factory.createVideoSource(capturer);
capturer.startCapture(videoWidth, videoHeight, videoFps);
localVideoTrack = factory.createVideoTrack(FORWARD_VIDEO_TRACK_ID, videoSource);
localVideoTrack.setEnabled(renderVideo);
return localVideoTrack;
}
PeerConnectionFactory類創(chuàng)建videoSource。
public VideoSource createVideoSource(VideoCapturer capturer) {
final EglBase.Context eglContext =
localEglbase == null ? null : localEglbase.getEglBaseContext();
final SurfaceTextureHelper surfaceTextureHelper =
SurfaceTextureHelper.create(VIDEO_CAPTURER_THREAD_NAME, eglContext);
long nativeAndroidVideoTrackSource =
nativeCreateVideoSource(nativeFactory, surfaceTextureHelper, capturer.isScreencast());
VideoCapturer.CapturerObserver capturerObserver =
new AndroidVideoTrackSourceObserver(nativeAndroidVideoTrackSource);
capturer.initialize(
surfaceTextureHelper, ContextUtils.getApplicationContext(), capturerObserver);
return new VideoSource(nativeAndroidVideoTrackSource);
}
可見PeerConnectionFactory類調(diào)用native函數(shù)nativeCreateVideoSource創(chuàng)建AndroidVideoTrackSource類型的變量,并且用long型保存該變量的指針。具體參見video_jni.cc文件中的函數(shù)nativeCreateVideoSource。
JNI_FUNCTION_DECLARATION(jlong,
PeerConnectionFactory_nativeCreateVideoSource,
JNIEnv* jni,
jclass,
jlong native_factory,
jobject j_surface_texture_helper,
jboolean is_screencast) {
OwnedFactoryAndThreads* factory =
reinterpret_cast<OwnedFactoryAndThreads*>(native_factory);
rtc::scoped_refptr<AndroidVideoTrackSource> source(
new rtc::RefCountedObject<AndroidVideoTrackSource>(
factory->signaling_thread(), jni, j_surface_texture_helper,
is_screencast));
rtc::scoped_refptr<VideoTrackSourceProxy> proxy_source =
VideoTrackSourceProxy::Create(factory->signaling_thread(),
factory->worker_thread(), source);
return (jlong)proxy_source.release();
}
回到PeerConnectionFactory類函數(shù)createVideoSource,接下來會(huì)執(zhí)行如下幾行代碼。創(chuàng)建AndroidVideoTrackSourceObserver類型的回調(diào),并且通過構(gòu)造函數(shù)將AndroidVideoTrackSource類型的變量傳遞進(jìn)去,以便在AndroidVideoTrackSourceObserver接受到圖像后傳遞給AndroidVideoTrackSource。
接著將AndroidVideoTrackSourceObserver類型變量注冊(cè)到CameraCapturer中來接受圖像。
最后執(zhí)行capturer.initialize進(jìn)行初始化。
VideoCapturer.CapturerObserver capturerObserver =
new AndroidVideoTrackSourceObserver(nativeAndroidVideoTrackSource);
capturer.initialize(
surfaceTextureHelper, ContextUtils.getApplicationContext(), capturerObserver);
return new VideoSource(nativeAndroidVideoTrackSource);
CameraCapturer.java 類的initialize函數(shù)。
@Override
public void initialize(SurfaceTextureHelper surfaceTextureHelper, Context applicationContext,
CapturerObserver capturerObserver) {
this.applicationContext = applicationContext;
this.capturerObserver = capturerObserver;
this.surfaceHelper = surfaceTextureHelper;
this.cameraThreadHandler =
surfaceTextureHelper == null ? null : surfaceTextureHelper.getHandler();
}
回到PeerConnectionClient.java類的createVideoTrack函數(shù),會(huì)執(zhí)行如下代碼,開始圖像采集。
capturer.startCapture(videoWidth, videoHeight, videoFps);
我們來看一下函數(shù)調(diào)用流程:
CameraCapturer.java類函數(shù)startCapture:
public void startCapture(int width, int height, int framerate) {
...
createSessionInternal(0, null /* mediaRecorder */);
...
CameraCapturer.java類的createSessionInternal函數(shù):
private void createSessionInternal(int delayMs, final MediaRecorder mediaRecorder) {
uiThreadHandler.postDelayed(openCameraTimeoutRunnable, delayMs + OPEN_CAMERA_TIMEOUT);
cameraThreadHandler.postDelayed(new Runnable() {
@Override
public void run() {
createCameraSession(createSessionCallback, cameraSessionEventsHandler, applicationContext,
surfaceHelper, mediaRecorder, cameraName, width, height, framerate);
}
}, delayMs);
}
Camera1Capturer.java類的createCameraSession函數(shù):
@Override
protected void createCameraSession(CameraSession.CreateSessionCallback createSessionCallback,
CameraSession.Events events, Context applicationContext,
SurfaceTextureHelper surfaceTextureHelper, MediaRecorder mediaRecorder, String cameraName,
int width, int height, int framerate) {
Camera1Session.create(createSessionCallback, events,
captureToTexture || (mediaRecorder != null), applicationContext, surfaceTextureHelper,
mediaRecorder, Camera1Enumerator.getCameraIndex(cameraName), width, height, framerate);
}
Camera1Session.java類的create函數(shù):
在該函數(shù)中實(shí)現(xiàn)了幾個(gè)功能:
1.打開camera。
2.給camera設(shè)置了一些參數(shù),包括幀率的信息。
3.給camera設(shè)置的了預(yù)覽texture。
4.創(chuàng)建Camera1Session。
public static void create(final CreateSessionCallback callback, final Events events,
final boolean captureToTexture, final Context applicationContext,
final SurfaceTextureHelper surfaceTextureHelper, final MediaRecorder mediaRecorder,
final int cameraId, final int width, final int height, final int framerate) {
final long constructionTimeNs = System.nanoTime();
Logging.d(TAG, "Open camera " + cameraId);
events.onCameraOpening();
final android.hardware.Camera camera;
try {
camera = android.hardware.Camera.open(cameraId);
} catch (RuntimeException e) {
callback.onFailure(FailureType.ERROR, e.getMessage());
return;
}
try {
camera.setPreviewTexture(surfaceTextureHelper.getSurfaceTexture());
} catch (IOException e) {
camera.release();
callback.onFailure(FailureType.ERROR, e.getMessage());
return;
}
final android.hardware.Camera.CameraInfo info = new android.hardware.Camera.CameraInfo();
android.hardware.Camera.getCameraInfo(cameraId, info);
final android.hardware.Camera.Parameters parameters = camera.getParameters();
final CaptureFormat captureFormat =
findClosestCaptureFormat(parameters, width, height, framerate);
final Size pictureSize = findClosestPictureSize(parameters, width, height);
updateCameraParameters(camera, parameters, captureFormat, pictureSize, captureToTexture);
if (!captureToTexture) {
final int frameSize = captureFormat.frameSize();
for (int i = 0; i < NUMBER_OF_CAPTURE_BUFFERS; ++i) {
final ByteBuffer buffer = ByteBuffer.allocateDirect(frameSize);
camera.addCallbackBuffer(buffer.array());
}
}
// Calculate orientation manually and send it as CVO insted.
camera.setDisplayOrientation(0 /* degrees */);
callback.onDone(
new Camera1Session(events, captureToTexture, applicationContext, surfaceTextureHelper,
mediaRecorder, cameraId, camera, info, captureFormat, constructionTimeNs));
}
Camera1Session.java類的Camera1Session函數(shù):
private Camera1Session(Events events, boolean captureToTexture, Context applicationContext,
SurfaceTextureHelper surfaceTextureHelper, MediaRecorder mediaRecorder, int cameraId,
android.hardware.Camera camera, android.hardware.Camera.CameraInfo info,
CaptureFormat captureFormat, long constructionTimeNs) {
...
startCapturing();
...
Camera1Session.java類的startCapturing函數(shù),這個(gè)函數(shù)主要實(shí)現(xiàn)了如下幾個(gè)功能:
1.執(zhí)行camera.startPreview();啟動(dòng)camera進(jìn)行圖像采集。
2.執(zhí)行l(wèi)istenForBytebufferFrames();監(jiān)聽圖像的到來。
private void startCapturing() {
...
if (captureToTexture) {
listenForTextureFrames();
} else {
listenForBytebufferFrames();
}
try {
camera.startPreview();
} catch (RuntimeException e) {
stopInternal();
events.onCameraError(this, e.getMessage());
}
...
Camera1Session.java類的listenForBytebufferFrames函數(shù),主要實(shí)現(xiàn)了將采集到的圖像通過回調(diào)傳遞回去,同時(shí)在需要時(shí),會(huì)對(duì)yuv類型進(jìn)行轉(zhuǎn)換,并且會(huì)將圖像采集時(shí)間傳遞過去。
private void listenForBytebufferFrames() {
camera.setPreviewCallbackWithBuffer(new android.hardware.Camera.PreviewCallback() {
@Override
public void onPreviewFrame(final byte[] data, android.hardware.Camera callbackCamera) {
checkIsOnCameraThread();
if (callbackCamera != camera) {
Logging.e(TAG, "Callback from a different camera. This should never happen.");
return;
}
if (state != SessionState.RUNNING) {
Logging.d(TAG, "Bytebuffer frame captured but camera is no longer running.");
return;
}
final long captureTimeNs = TimeUnit.MILLISECONDS.toNanos(SystemClock.elapsedRealtime());
if (!firstFrameReported) {
final int startTimeMs =
(int) TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - constructionTimeNs);
camera1StartTimeMsHistogram.addSample(startTimeMs);
firstFrameReported = true;
}
if (videoFrameEmitTrialEnabled) {
VideoFrame.Buffer frameBuffer = new NV21Buffer(data, captureFormat.width,
captureFormat.height, () -> cameraThreadHandler.post(() -> {
if (state == SessionState.RUNNING) {
camera.addCallbackBuffer(data);
}
}));
final VideoFrame frame =
new VideoFrame(frameBuffer, getFrameOrientation(), captureTimeNs);
events.onFrameCaptured(Camera1Session.this, frame);
frame.release();
} else {
events.onByteBufferFrameCaptured(Camera1Session.this, data, captureFormat.width,
captureFormat.height, getFrameOrientation(), captureTimeNs);
camera.addCallbackBuffer(data);
}
}
});
}
CameraCapturer.java類的onFrameCaptured函數(shù):
@Override
public void onFrameCaptured(CameraSession session, VideoFrame frame) {
checkIsOnCameraThread();
synchronized (stateLock) {
if (session != currentSession) {
Logging.w(TAG, "onTextureFrameCaptured from another session.");
return;
}
if (!firstFrameObserved) {
eventsHandler.onFirstFrameAvailable();
firstFrameObserved = true;
}
cameraStatistics.addFrame();
capturerObserver.onFrameCaptured(frame);
}
}
AndroidVideoTrackSourceObserver.java類的onFrameCaptured函數(shù):
@Override
public void onFrameCaptured(VideoFrame frame) {
nativeOnFrameCaptured(nativeSource, frame.getBuffer().getWidth(), frame.getBuffer().getHeight(),
frame.getRotation(), frame.getTimestampNs(), frame.getBuffer());
}
androidvideotracksource_jni.cc文件中的JNI_FUNCTION_DECLARATION函數(shù):
JNI_FUNCTION_DECLARATION(void,
AndroidVideoTrackSourceObserver_nativeOnFrameCaptured,
JNIEnv* jni,
jclass,
jlong j_source,
jint j_width,
jint j_height,
jint j_rotation,
jlong j_timestamp_ns,
jobject j_video_frame_buffer) {
AndroidVideoTrackSource* source =
AndroidVideoTrackSourceFromJavaProxy(j_source);
source->OnFrameCaptured(jni, j_width, j_height, j_timestamp_ns,
jintToVideoRotation(j_rotation),
j_video_frame_buffer);
}
AndroidVideoTrackSource類的OnFrameCaptured函數(shù),在這個(gè)函數(shù)中實(shí)現(xiàn)了:
1.需要的時(shí)候?qū)D像進(jìn)行裁剪。
2.數(shù)據(jù)接著往外傳遞。
void AndroidVideoTrackSource::OnFrameCaptured(JNIEnv* jni,
int width,
int height,
int64_t timestamp_ns,
VideoRotation rotation,
jobject j_video_frame_buffer) {
RTC_DCHECK(camera_thread_checker_.CalledOnValidThread());
int64_t camera_time_us = timestamp_ns / rtc::kNumNanosecsPerMicrosec;
int64_t translated_camera_time_us =
timestamp_aligner_.TranslateTimestamp(camera_time_us, rtc::TimeMicros());
int adapted_width;
int adapted_height;
int crop_width;
int crop_height;
int crop_x;
int crop_y;
//這個(gè)地方可能對(duì)圖像進(jìn)行了裁剪。
if (!AdaptFrame(width, height, camera_time_us, &adapted_width,
&adapted_height, &crop_width, &crop_height, &crop_x,
&crop_y)) {
return;
}
rtc::scoped_refptr<VideoFrameBuffer> buffer =
AndroidVideoBuffer::Create(jni, j_video_frame_buffer)
->CropAndScale(jni, crop_x, crop_y, crop_width, crop_height,
adapted_width, adapted_height);
// AdaptedVideoTrackSource handles applying rotation for I420 frames.
if (apply_rotation() && rotation != kVideoRotation_0) {
buffer = buffer->ToI420();
}
OnFrame(VideoFrame(buffer, rotation, translated_camera_time_us));
}
AdaptedVideoTrackSource類中的OnFrame函數(shù):
void AdaptedVideoTrackSource::OnFrame(const webrtc::VideoFrame& frame) {
rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer(
frame.video_frame_buffer());
/* Note that this is a "best effort" approach to
wants.rotation_applied; apply_rotation_ can change from false to
true between the check of apply_rotation() and the call to
broadcaster_.OnFrame(), in which case we generate a frame with
pending rotation despite some sink with wants.rotation_applied ==
true was just added. The VideoBroadcaster enforces
synchronization for us in this case, by not passing the frame on
to sinks which don't want it. */
if (apply_rotation() && frame.rotation() != webrtc::kVideoRotation_0 &&
buffer->type() == webrtc::VideoFrameBuffer::Type::kI420) {
/* Apply pending rotation. */
broadcaster_.OnFrame(webrtc::VideoFrame(
webrtc::I420Buffer::Rotate(*buffer->GetI420(), frame.rotation()),
webrtc::kVideoRotation_0, frame.timestamp_us()));
} else {
broadcaster_.OnFrame(frame);
}
}
VideoBroadcaster類的OnFrame函數(shù),這個(gè)函數(shù)實(shí)現(xiàn)查詢sink_pairs()找到不同的sink,將圖像傳遞給這些sink,這些sink包括編碼器。編碼器獲得圖像是在這個(gè)地方。
void VideoBroadcaster::OnFrame(const webrtc::VideoFrame& frame) {
rtc::CritScope cs(&sinks_and_wants_lock_);
for (auto& sink_pair : sink_pairs()) {
if (sink_pair.wants.rotation_applied &&
frame.rotation() != webrtc::kVideoRotation_0) {
// Calls to OnFrame are not synchronized with changes to the sink wants.
// When rotation_applied is set to true, one or a few frames may get here
// with rotation still pending. Protect sinks that don't expect any
// pending rotation.
RTC_LOG(LS_VERBOSE) << "Discarding frame with unexpected rotation.";
continue;
}
if (sink_pair.wants.black_frames) {
sink_pair.sink->OnFrame(webrtc::VideoFrame(
GetBlackFrameBuffer(frame.width(), frame.height()), frame.rotation(),
frame.timestamp_us()));
} else {
sink_pair.sink->OnFrame(frame);
}
}
}
編碼器初始化與采集模塊的銜接:
編碼器的初始化從創(chuàng)建PeerConnectionFactory開始。
PeerConnectionClient.java類的createPeerConnectionFactoryInternal函數(shù):
private void createPeerConnectionFactoryInternal(Context context) {
...
//由于使用硬件加速,這里創(chuàng)建了DefaultVideoEncoderFactory,用于創(chuàng)建硬件編碼器。
if (peerConnectionParameters.videoCodecHwAcceleration) {
encoderFactory = new DefaultVideoEncoderFactory(
rootEglBase.getEglBaseContext(), true /* enableIntelVp8Encoder */, enableH264HighProfile);
decoderFactory = null;//new DefaultVideoDecoderFactory(rootEglBase.getEglBaseContext());
} else {
encoderFactory = new SoftwareVideoEncoderFactory();
decoderFactory = new SoftwareVideoDecoderFactory();
}
factory = new PeerConnectionFactory(options, encoderFactory, decoderFactory);
...
PeerConnectionClient的構(gòu)造函數(shù)。這里創(chuàng)建了native PeerConnectionFactory,并且用long保存該變量的指針。
public PeerConnectionFactory(
Options options, VideoEncoderFactory encoderFactory, VideoDecoderFactory decoderFactory) {
checkInitializeHasBeenCalled();
nativeFactory = nativeCreatePeerConnectionFactory(options, encoderFactory, decoderFactory);
if (nativeFactory == 0) {
throw new RuntimeException("Failed to initialize PeerConnectionFactory!");
}
}
具體創(chuàng)建native PeerConnectionFactory在文件peerconnectionfactory_jni.cc中。
JNI_FUNCTION_DECLARATION(
jlong,
PeerConnectionFactory_nativeCreatePeerConnectionFactory,
JNIEnv* jni,
jclass,
jobject joptions,
jobject jencoder_factory,
jobject jdecoder_factory) {
return CreatePeerConnectionFactoryForJava(jni, joptions, jencoder_factory,
jdecoder_factory,
CreateAudioProcessing());
}
peerconnectionfactory_jni.cc文件中的CreatePeerConnectionFactoryForJava函數(shù)。
jlong CreatePeerConnectionFactoryForJava(
JNIEnv* jni,
jobject joptions,
jobject jencoder_factory,
jobject jdecoder_factory,
rtc::scoped_refptr<AudioProcessing> audio_processor) {
...
// This uses the new API, does not automatically include software codecs.
std::unique_ptr<VideoEncoderFactory> video_encoder_factory = nullptr;
if (jencoder_factory == nullptr) {
legacy_video_encoder_factory = CreateLegacyVideoEncoderFactory();
video_encoder_factory = std::unique_ptr<VideoEncoderFactory>(
WrapLegacyVideoEncoderFactory(legacy_video_encoder_factory));
} else {
// video_encoder_factory真正的類型是:VideoEncoderFactoryWrapper。
//用一個(gè)Wrapper來構(gòu)建c++和java交互。
video_encoder_factory = std::unique_ptr<VideoEncoderFactory>(
CreateVideoEncoderFactory(jni, jencoder_factory));
}
//由于傳遞過來的jdecoder_factory為nullptr,所以在這里調(diào)用
//CreateLegacyVideoDecoderFactory創(chuàng)建了一個(gè)legacy_video_decoder_factory。
//進(jìn)入CreateLegacyVideoDecoderFactory函數(shù),發(fā)現(xiàn)new 了一個(gè)
//MediaCodecVideoDecoderFactory類型的對(duì)象,而該類型的對(duì)象創(chuàng)建的解碼器類型
//是MediaCodecVideoDecoder,是硬件解碼器。通過這里我們也了解到webrtc在
//android上使用的解碼類型是HW(hardware)。編碼類型也是使用的也是HW。
std::unique_ptr<VideoDecoderFactory> video_decoder_factory = nullptr;
if (jdecoder_factory == nullptr) {
legacy_video_decoder_factory = CreateLegacyVideoDecoderFactory();
video_decoder_factory = std::unique_ptr<VideoDecoderFactory>(
WrapLegacyVideoDecoderFactory(legacy_video_decoder_factory));
} else {
video_decoder_factory = std::unique_ptr<VideoDecoderFactory>(
CreateVideoDecoderFactory(jni, jdecoder_factory));
}
//CreateMediaEngine創(chuàng)造的類型是:
//CompositeMediaEngine<WebRtcVoiceEngine, WebRtcVideoEngine>
//該類在文件mediaengine.h中。
//video_encoder_factory和video_decoder_factory傳遞進(jìn)入,用于創(chuàng)建編解碼器。
rtc::scoped_refptr<AudioDeviceModule> adm_scoped = nullptr;
media_engine.reset(CreateMediaEngine(
adm_scoped, audio_encoder_factory, audio_decoder_factory,
std::move(video_encoder_factory), std::move(video_decoder_factory),
audio_mixer, audio_processor));
...
到這里我們了解了video_encoder_factory和video_decoder_factory的創(chuàng)建過程,接下來將了解video_encoder的創(chuàng)建過程和與采集模塊的銜接。
來到peerconnection.cc文件中有一個(gè)函數(shù)PeerConnection::SetLocalDescription,這個(gè)函數(shù)在webrtc CreateOffer或者是CreateAnswer過程中產(chǎn)生sdp是被調(diào)用。
void PeerConnection::SetLocalDescription(
SetSessionDescriptionObserver* observer,
SessionDescriptionInterface* desc) {
...
// Takes the ownership of |desc_temp|. On success, local_description() is
// updated to reflect the description that was passed in.
if (!SetCurrentOrPendingLocalDescription(std::move(desc_temp), &error)) {
PostSetSessionDescriptionFailure(observer, error);
return;
}
...
PeerConnection類的SetCurrentOrPendingLocalDescription函數(shù):
bool PeerConnection::SetCurrentOrPendingLocalDescription(
std::unique_ptr<SessionDescriptionInterface> desc,
std::string* err_desc) {
...
// Transport and Media channels will be created only when offer is set.
if (action == kOffer && !CreateChannels(local_description()->description())) {
// TODO(mallinath) - Handle CreateChannel failure, as new local description
// is applied. Restore back to old description.
return BadLocalSdp(local_description()->type(), kCreateChannelFailed,
err_desc);
}
...
PeerConnection類的CreateChannels函數(shù):
bool PeerConnection::CreateChannels(const SessionDescription* desc) {
...
const cricket::ContentInfo* video = cricket::GetFirstVideoContent(desc);
if (video && !video->rejected && !video_channel()) {
if (!CreateVideoChannel(video,
GetBundleTransportName(video, bundle_group))) {
RTC_LOG(LS_ERROR) << "Failed to create video channel.";
return false;
}
}
...
PeerConnection類的CreateVideoChannel函數(shù):
// TODO(steveanton): Perhaps this should be managed by the RtpTransceiver.
bool PeerConnection::CreateVideoChannel(const cricket::ContentInfo* content,
const std::string* bundle_transport) {
...
cricket::VideoChannel* video_channel = channel_manager()->CreateVideoChannel(
call_.get(), configuration_.media_config, rtp_dtls_transport,
rtcp_dtls_transport, transport_controller_->signaling_thread(),
content->name, SrtpRequired(), video_options_);
//將video_channel進(jìn)行存儲(chǔ)。
GetVideoTransceiver()->internal()->SetChannel(video_channel);
...
ChannelManager類的CreateVideoChannel函數(shù):
VideoChannel* ChannelManager::CreateVideoChannel(
webrtc::Call* call,
const cricket::MediaConfig& media_config,
DtlsTransportInternal* rtp_transport,
DtlsTransportInternal* rtcp_transport,
rtc::Thread* signaling_thread,
const std::string& content_name,
bool srtp_required,
const VideoOptions& options) {
return worker_thread_->Invoke<VideoChannel*>(RTC_FROM_HERE, [&] {
return CreateVideoChannel_w(
call, media_config, rtp_transport, rtcp_transport, rtp_transport,
rtcp_transport, signaling_thread, content_name, srtp_required, options);
});
}
ChannelManager類的CreateVideoChannel_w函數(shù):
VideoChannel* ChannelManager::CreateVideoChannel_w(
webrtc::Call* call,
const cricket::MediaConfig& media_config,
DtlsTransportInternal* rtp_dtls_transport,
DtlsTransportInternal* rtcp_dtls_transport,
rtc::PacketTransportInternal* rtp_packet_transport,
rtc::PacketTransportInternal* rtcp_packet_transport,
rtc::Thread* signaling_thread,
const std::string& content_name,
bool srtp_required,
const VideoOptions& options) {
RTC_DCHECK_RUN_ON(worker_thread_);
RTC_DCHECK(initialized_);
RTC_DCHECK(call);
RTC_DCHECK(media_engine_);
//media_engine_真正的類型是
//CompositeMediaEngine<WebRtcVoiceEngine, WebRtcVideoEngine>
//該類在文件mediaengine.h中。
VideoMediaChannel* media_channel = media_engine_->CreateVideoChannel(
call, media_config, options);
if (!media_channel) {
return nullptr;
}
//創(chuàng)建類型是VideoChannel的video_channel,他包含了類型是WebRtcVideoChannel的media_channel。
auto video_channel = rtc::MakeUnique<VideoChannel>(
worker_thread_, network_thread_, signaling_thread,
rtc::WrapUnique(media_channel), content_name,
rtcp_packet_transport == nullptr, srtp_required);
video_channel->Init_w(rtp_dtls_transport, rtcp_dtls_transport,
rtp_packet_transport, rtcp_packet_transport);
VideoChannel* video_channel_ptr = video_channel.get();
video_channels_.push_back(std::move(video_channel));
return video_channel_ptr;
}
virtual VideoMediaChannel* CreateVideoChannel(webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options) {
return video().CreateChannel(call, config, options);
}
WebRtcVideoChannel* WebRtcVideoEngine::CreateChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options) {
RTC_LOG(LS_INFO) << "CreateChannel. Options: " << options.ToString();
return new WebRtcVideoChannel(call, config, options, encoder_factory_.get(), decoder_factory_.get());
}
以上一連串看似無關(guān)緊要的函數(shù)調(diào)用關(guān)系說明了WebRtcVideoChannel的創(chuàng)建過程,而WebRtcVideoChannel是創(chuàng)建編碼器,初始化編碼器的一個(gè)橋梁。
回到PeerConnection類的SetCurrentOrPendingLocalDescription函數(shù):
bool PeerConnection::SetCurrentOrPendingLocalDescription(
std::unique_ptr<SessionDescriptionInterface> desc,
std::string* err_desc) {
...
if (!UpdateSessionState(action, cricket::CS_LOCAL, err_desc)) {
return false;
}
...
PeerConnection類的UpdateSessionState
bool PeerConnection::UpdateSessionState(Action action,
cricket::ContentSource source,
std::string* err_desc) {
...
if (!PushdownMediaDescription(cricket::CA_OFFER, source, err_desc)) {
SetError(ERROR_CONTENT, *err_desc);
}
...
PeerConnection類的PushdownMediaDescription函數(shù):
bool PeerConnection::PushdownMediaDescription(cricket::ContentAction action,
cricket::ContentSource source,
std::string* err) {
...
for (auto* channel : Channels()) {
// TODO(steveanton): Add support for multiple channels of the same type.
const ContentInfo* content_info =
cricket::GetFirstMediaContent(sdesc->contents(), channel->media_type());
if (!content_info) {
continue;
}
const MediaContentDescription* content_desc =
static_cast<const MediaContentDescription*>(content_info->description);
if (content_desc && !content_info->rejected) {
bool success = (source == cricket::CS_LOCAL)
? channel->SetLocalContent(content_desc, action, err)
: channel->SetRemoteContent(content_desc, action, err);
if (!success) {
all_success = false;
break;
}
}
}
...
BaseChannel類的SetLocalContent函數(shù):
bool BaseChannel::SetLocalContent(const MediaContentDescription* content,
ContentAction action,
std::string* error_desc) {
TRACE_EVENT0("webrtc", "BaseChannel::SetLocalContent");
return InvokeOnWorker<bool>(
RTC_FROM_HERE,
Bind(&BaseChannel::SetLocalContent_w, this, content, action, error_desc));
}
VideoChannel類的SetLocalContent_w函數(shù):
bool VideoChannel::SetLocalContent_w(const MediaContentDescription* content,
ContentAction action,
std::string* error_desc) {
...
// TODO(pthatcher): Move local streams into VideoSendParameters, and
// only give it to the media channel once we have a remote
// description too (without a remote description, we won't be able
// to send them anyway).
if (!UpdateLocalStreams_w(video->streams(), action, error_desc)) {
SafeSetError("Failed to set local video description streams.", error_desc);
return false;
}
...
BaseChannel類的UpdateLocalStreams_w函數(shù):
bool BaseChannel::UpdateLocalStreams_w(const std::vector<StreamParams>& streams,
ContentAction action,
std::string* error_desc) {
...
// Check for new streams.
for (StreamParamsVec::const_iterator it = streams.begin();
it != streams.end(); ++it) {
if (!GetStreamBySsrc(local_streams_, it->first_ssrc())) {
if (media_channel()->AddSendStream(*it)) {
RTC_LOG(LS_INFO) << "Add send stream ssrc: " << it->ssrcs[0];
} else {
std::ostringstream desc;
desc << "Failed to add send stream ssrc: " << it->first_ssrc();
SafeSetError(desc.str(), error_desc);
ret = false;
}
}
}
...
WebRtcVideoChannel類的AddSendStream函數(shù):
bool WebRtcVideoChannel::AddSendStream(const StreamParams& sp) {
...
//創(chuàng)建WebRtcVideoSendStream并保存到send_streams_中。
WebRtcVideoSendStream* stream = new WebRtcVideoSendStream(
call_, sp, std::move(config), default_send_options_, encoder_factory_,
video_config_.enable_cpu_overuse_detection,
bitrate_config_.max_bitrate_bps, send_codec_, send_rtp_extensions_,
send_params_);
uint32_t ssrc = sp.first_ssrc();
RTC_DCHECK(ssrc != 0);
send_streams_[ssrc] = stream;
...
WebRtcVideoSendStream的構(gòu)造函數(shù):
WebRtcVideoChannel::WebRtcVideoSendStream::WebRtcVideoSendStream(
...
if (codec_settings) {
bool force_encoder_allocation = false;
SetCodec(*codec_settings, force_encoder_allocation);
}
...
WebRtcVideoChannel::WebRtcVideoSendStream類的SetCodec函數(shù):
void WebRtcVideoChannel::WebRtcVideoSendStream::SetCodec(
const VideoCodecSettings& codec_settings,
bool force_encoder_allocation) {
...
// Do not re-create encoders of the same type. We can't overwrite
// |allocated_encoder_| immediately, because we need to release it after the
// RecreateWebRtcStream() call.
std::unique_ptr<webrtc::VideoEncoder> new_encoder;
if (force_encoder_allocation || !allocated_encoder_ ||
allocated_codec_ != codec_settings.codec) {
const webrtc::SdpVideoFormat format(codec_settings.codec.name,
codec_settings.codec.params);
//在這個(gè)地方創(chuàng)建了編碼器new_encoder,最終將這個(gè)編碼器保存到了
//VCMCodecDataBase的external_encoder_成員。new_encoder 的類型是
//VideoEncoderWrapper,是對(duì)HardwareVideoEncoder的封裝。
//由VideoEncoderFactoryWrapper創(chuàng)建。
new_encoder = encoder_factory_->CreateVideoEncoder(format);
parameters_.config.encoder_settings.encoder = new_encoder.get();
const webrtc::VideoEncoderFactory::CodecInfo info =
encoder_factory_->QueryVideoEncoder(format);
parameters_.config.encoder_settings.full_overuse_time =
info.is_hardware_accelerated;
parameters_.config.encoder_settings.internal_source =
info.has_internal_source;
} else {
new_encoder = std::move(allocated_encoder_);
}
RecreateWebRtcStream();
....
WebRtcVideoChannel::WebRtcVideoSendStream類的RecreateWebRtcStream函數(shù),/這個(gè)函數(shù)中主要包括如下兩個(gè)流程:
1.調(diào)用Call對(duì)象的CreateVideoSendStream創(chuàng)建VideoSendStream對(duì)象。
2.調(diào)用SetSource函數(shù)設(shè)置圖像數(shù)據(jù)源,最后會(huì)調(diào)用VideoTrack的AddOrUpdateSink函數(shù)注冊(cè)sink對(duì)象,該sink對(duì)象是一個(gè)VideoStreamEncoder對(duì)象。
void WebRtcVideoChannel::WebRtcVideoSendStream::RecreateWebRtcStream() {
...
webrtc::VideoSendStream::Config config = parameters_.config.Copy();
//創(chuàng)建的stream_真正的類型是VideoSendStream。
stream_ = call_->CreateVideoSendStream(std::move(config),
parameters_.encoder_config.Copy());
parameters_.encoder_config.encoder_specific_settings = NULL;
//調(diào)用SetSource函數(shù)設(shè)置圖像數(shù)據(jù)源,最后會(huì)調(diào)用VideoTrack的
//AddOrUpdateSink函數(shù)注冊(cè)sink對(duì)象,該sink對(duì)象是一個(gè)
//VideoStreamEncoder對(duì)象。這樣到圖像到來的時(shí)候就會(huì)傳遞給
//VideoStreamEncoder,最后傳遞給編碼器。
if (source_) {
stream_->SetSource(this, GetDegradationPreference());
}
...
我們先看編碼器的初始化,然后再看一下采集與編碼的銜接。
Call類的CreateVideoSendStream函數(shù):
webrtc::VideoSendStream* Call::CreateVideoSendStream(
webrtc::VideoSendStream::Config config,
VideoEncoderConfig encoder_config) {
VideoSendStream* send_stream = new VideoSendStream(
num_cpu_cores_, module_process_thread_.get(), &worker_queue_,
call_stats_.get(), transport_send_.get(), bitrate_allocator_.get(),
video_send_delay_stats_.get(), event_log_, std::move(config),
std::move(encoder_config), suspended_video_send_ssrcs_,
suspended_video_payload_states_);
video_send_streams_.insert(send_stream);
VideoSendStream構(gòu)造函數(shù)。/
該構(gòu)造函數(shù)主要是創(chuàng)建了VideoStreamEncoder對(duì)象和VideoSendStreamImpl對(duì)象,然后調(diào)用ReconfigureVideoEncoder初始化編碼器
VideoSendStreamImpl對(duì)象的創(chuàng)建是在ConstructionTask的Run函數(shù)中完成的。
VideoSendStream::VideoSendStream(
int num_cpu_cores,
ProcessThread* module_process_thread,
rtc::TaskQueue* worker_queue,
CallStats* call_stats,
RtpTransportControllerSendInterface* transport,
BitrateAllocator* bitrate_allocator,
SendDelayStats* send_delay_stats,
RtcEventLog* event_log,
VideoSendStream::Config config,
VideoEncoderConfig encoder_config,
const std::map<uint32_t, RtpState>& suspended_ssrcs,
const std::map<uint32_t, RtpPayloadState>& suspended_payload_states)
video_stream_encoder_.reset(
new VideoStreamEncoder(num_cpu_cores, &stats_proxy_,
config_.encoder_settings,
config_.pre_encode_callback,
config_.post_encode_callback,
std::unique_ptr<OveruseFrameDetector>()));
worker_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(new ConstructionTask(
&send_stream_, &thread_sync_event_, &stats_proxy_,
video_stream_encoder_.get(), module_process_thread, call_stats, transport,
bitrate_allocator, send_delay_stats, event_log, &config_,
encoder_config.max_bitrate_bps, suspended_ssrcs, suspended_payload_states,
encoder_config.content_type)));
ReconfigureVideoEncoder(std::move(encoder_config));
VideoStreamEncoder::VideoStreamEncoder(
uint32_t number_of_cores,
SendStatisticsProxy* stats_proxy,
const VideoSendStream::Config::EncoderSettings& settings,
rtc::VideoSinkInterface<VideoFrame>* pre_encode_callback,
EncodedFrameObserver* encoder_timing,
std::unique_ptr<OveruseFrameDetector> overuse_detector)
: shutdown_event_(true /* manual_reset */, false),
number_of_cores_(number_of_cores),
initial_rampup_(0),
//主要是初始化source_proxy_對(duì)象和video_sender_對(duì)象。
source_proxy_(new VideoSourceProxy(this)),
sink_(nullptr),
settings_(settings),
codec_type_(PayloadStringToCodecType(settings.payload_name)),
video_sender_(Clock::GetRealTimeClock(), this),
VideoSender::VideoSender(Clock* clock, EncodedImageCallback* **post_encode_callback**)
: _encoder(nullptr),
_mediaOpt(clock),
//主要是初始化_encodedFrameCallback和codecDataBase對(duì)象。
//post_encode_callback是一個(gè)VideoStreamEncoder對(duì)象。
//_encodedFrameCallback 的類型是VCMEncodedFrameCallback。
_encodedFrameCallback(post_encode_callback, &_mediaOpt),
post_encode_callback_(post_encode_callback),
//_encodedFrameCallback類型是VCMEncodedFrameCallback,他包含了
//VideoStreamEncoder對(duì)象作為回掉。這個(gè)回調(diào)會(huì)被注冊(cè)到編碼器中,接受編碼器編碼后的數(shù)據(jù)。
_codecDataBase(&_encodedFrameCallback),
回到VideoStreamEncoder構(gòu)造函數(shù):
VideoStreamEncoder::VideoStreamEncoder(
video_sender_.RegisterExternalEncoder(
settings_.encoder, settings_.payload_type,
settings_.internal_source);
/ Register an external decoder object.
// This can not be used together with external decoder callbacks.
void VideoSender::RegisterExternalEncoder(
VideoEncoder* externalEncoder,
uint8_t payloadType,
bool internalSource /*= false*/) {
//最終將編碼器對(duì)象保存在VCMCodecDataBase的external_encoder_成員。
//externalEncoder 的類型是VideoEncoderWrapper,
//是對(duì)HardwareVideoEncoder的封裝。由VideoEncoderFactoryWrapper創(chuàng)建。
_codecDataBase.RegisterExternalEncoder(externalEncoder, payloadType,
internalSource);
再回到VideoSendStream的構(gòu)造函數(shù)會(huì)執(zhí)行ReconfigureVideoEncoder(std::move(encoder_config)); 來對(duì)編碼器進(jìn)行初始化。
VideoSendStream類的ReconfigureVideoEncoder函數(shù):
void VideoSendStream::ReconfigureVideoEncoder(VideoEncoderConfig config) {
// TODO(perkj): Some test cases in VideoSendStreamTest call
// ReconfigureVideoEncoder from the network thread.
// RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DCHECK(content_type_ == config.content_type);
video_stream_encoder_->ConfigureEncoder(std::move(config),
config_.rtp.max_packet_size,
config_.rtp.nack.rtp_history_ms > 0);
}
VideoStreamEncoder的ConfigureEncoder函數(shù):
void VideoStreamEncoder::ConfigureEncoder(VideoEncoderConfig config,
size_t max_data_payload_length,
bool nack_enabled) {
encoder_queue_.PostTask(
std::unique_ptr<rtc::QueuedTask>(new ConfigureEncoderTask(
this, std::move(config), max_data_payload_length, nack_enabled)));
}
VideoStreamEncoder的ConfigureEncoderOnTaskQueue函數(shù):
void VideoStreamEncoder::ConfigureEncoderOnTaskQueue(
VideoEncoderConfig config,
size_t max_data_payload_length,
bool nack_enabled) {
// Reconfigure the encoder now if the encoder has an internal source or
// if the frame resolution is known. Otherwise, the reconfiguration is
// deferred until the next frame to minimize the number of reconfigurations.
// The codec configuration depends on incoming video frame size.
if (last_frame_info_) {
ReconfigureEncoder();
} else if (settings_.internal_source) {
last_frame_info_ =
rtc::Optional<VideoFrameInfo>(VideoFrameInfo(176, 144, false));
ReconfigureEncoder();
}
VideoStreamEncoder類的ReconfigureEncoder函數(shù):
void VideoStreamEncoder::ReconfigureEncoder() {
bool success = video_sender_.RegisterSendCodec(
&codec, number_of_cores_,
static_cast<uint32_t>(max_data_payload_length_)) == VCM_OK;
VideoSender類的RegisterSendCodec函數(shù):
// Register the send codec to be used.
int32_t VideoSender::RegisterSendCodec(const VideoCodec* sendCodec,
uint32_t numberOfCores,
uint32_t maxPayloadSize) {
bool ret =
_codecDataBase.SetSendCodec(sendCodec, numberOfCores, maxPayloadSize);
// Update encoder regardless of result to make sure that we're not holding on
// to a deleted instance.
_encoder = _codecDataBase.GetEncoder();
// Cache the current codec here so they can be fetched from this thread
// without requiring the _sendCritSect lock.
current_codec_ = *sendCodec;
VCMCodecDataBase類的SetSendCodec函數(shù),在這個(gè)函數(shù)中實(shí)現(xiàn)了:
1.創(chuàng)建類型是VCMGenericEncoder的ptr_encoder_變量,并將之前創(chuàng)建的編碼器和編碼后接受數(shù)據(jù)的回調(diào)傳遞進(jìn)去。
2.調(diào)用編碼器初始化函數(shù)。
// Assuming only one registered encoder - since only one used, no need for more.
bool VCMCodecDataBase::SetSendCodec(const VideoCodec* send_codec,
int number_of_cores,
size_t max_payload_size) {
ptr_encoder_.reset(new VCMGenericEncoder(
external_encoder_, encoded_frame_callback_, internal_source_));
encoded_frame_callback_->SetInternalSource(internal_source_);
if (ptr_encoder_->InitEncode(&send_codec_, number_of_cores_,
max_payload_size_) < 0) {
RTC_LOG(LS_ERROR) << "Failed to initialize video encoder.";
DeleteEncoder();
return false;
VCMGenericEncoder類的InitEncode函數(shù):
int32_t VCMGenericEncoder::InitEncode(const VideoCodec* settings,
int32_t number_of_cores,
size_t max_payload_size) {
//真正的編碼器初始化。
if (encoder_->InitEncode(settings, number_of_cores, max_payload_size) != 0) {
RTC_LOG(LS_ERROR) << "Failed to initialize the encoder associated with "
"payload name: "
<< settings->plName;
return -1;
}
vcm_encoded_frame_callback_->Reset();
//這里將回調(diào)注冊(cè)到編碼器,圖像被編碼完成后會(huì)進(jìn)行回調(diào)。
encoder_->RegisterEncodeCompleteCallback(vcm_encoded_frame_callback_);
到這里編碼器的初始化流程就完成了,接下來我們回到WebRtcVideoChannel::WebRtcVideoSendStream類的RecreateWebRtcStream函數(shù),來看一下采集如何和編碼進(jìn)行銜接。
WebRtcVideoChannel::WebRtcVideoSendStream類的RecreateWebRtcStream函數(shù):
void WebRtcVideoChannel::WebRtcVideoSendStream::RecreateWebRtcStream() {
...
webrtc::VideoSendStream::Config config = parameters_.config.Copy();
//創(chuàng)建的stream_真正的類型是VideoSendStream。
stream_ = call_->CreateVideoSendStream(std::move(config),
parameters_.encoder_config.Copy());
parameters_.encoder_config.encoder_specific_settings = NULL;
//調(diào)用SetSource函數(shù)設(shè)置圖像數(shù)據(jù)源,最后會(huì)調(diào)用VideoTrack的
//AddOrUpdateSink函數(shù)注冊(cè)sink對(duì)象,該sink對(duì)象是一個(gè)
//VideoStreamEncoder對(duì)象。這樣到圖像到來的時(shí)候就會(huì)傳遞給
//VideoStreamEncoder,最后傳遞給編碼器。
if (source_) {
stream_->SetSource(this, GetDegradationPreference());
}
...
VideoSendStream類的SetSource函數(shù):
void VideoSendStream::SetSource(
rtc::VideoSourceInterface<webrtc::VideoFrame>* source,
const DegradationPreference& degradation_preference) {
RTC_DCHECK_RUN_ON(&thread_checker_);
video_stream_encoder_->SetSource(source, degradation_preference);
}
VideoStreamEncoder類的SetSource函數(shù):
void VideoStreamEncoder::SetSource(
rtc::VideoSourceInterface<VideoFrame>* source,
const VideoSendStream::DegradationPreference& degradation_preference) {
source_proxy_->SetSource(source, degradation_preference);
VideoStreamEncoder::VideoSourceProxy類的SetSource函數(shù):
void VideoStreamEncoder::VideoSourceProxy::SetSource(
rtc::VideoSourceInterface<VideoFrame>* source,
const VideoSendStream::DegradationPreference& degradation_preference) {
// Called on libjingle's worker thread.
RTC_DCHECK_CALLED_SEQUENTIALLY(&main_checker_);
rtc::VideoSourceInterface<VideoFrame>* old_source = nullptr;
rtc::VideoSinkWants wants;
{
rtc::CritScope lock(&crit_);
degradation_preference_ = degradation_preference;
old_source = source_;
//這里的source的類型是WebRtcVideoChannel::WebRtcVideoSendStream。
source_ = source;
wants = GetActiveSinkWantsInternal();
}
if (old_source != source && old_source != nullptr) {
old_source->RemoveSink(video_stream_encoder_);
}
if (!source) {
return;
}
source->AddOrUpdateSink(video_stream_encoder_, wants);
}
WebRtcVideoChannel::WebRtcVideoSendStream類的AddOrUpdateSink函數(shù):
void WebRtcVideoChannel::WebRtcVideoSendStream::AddOrUpdateSink(
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
const rtc::VideoSinkWants& wants) {
//這里source其實(shí)是上層的track,真正的類型是VideoTrack。
source_->AddOrUpdateSink(encoder_sink_, wants);
VideoTrack類的AddOrUpdateSink函數(shù):
void VideoTrack::AddOrUpdateSink(
rtc::VideoSinkInterface<VideoFrame>* sink,
const rtc::VideoSinkWants& wants) {
RTC_DCHECK(worker_thread_->IsCurrent());
VideoSourceBase::AddOrUpdateSink(sink, wants);
rtc::VideoSinkWants modified_wants = wants;
modified_wants.black_frames = !enabled();
//video_source_的真正類型是AndroidVideoTrackSource,在VideoTrack構(gòu)造函數(shù)中被初始化。
video_source_->AddOrUpdateSink(sink, modified_wants);
}
AndroidVideoTrackSource繼承自AdaptedVideoTrackSource,所以這里執(zhí)行的AdaptedVideoTrackSource類的AddOrUpdateSink。
void AdaptedVideoTrackSource::AddOrUpdateSink(
rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
const rtc::VideoSinkWants& wants) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
//sink的真正類型是VideoStreamEncoder。這里鏈接到采集模塊數(shù)據(jù)的到來。
broadcaster_.AddOrUpdateSink(sink, wants);
OnSinkWantsChanged(broadcaster_.wants());
}
void VideoBroadcaster::AddOrUpdateSink(
VideoSinkInterface<webrtc::VideoFrame>* sink,
const VideoSinkWants& wants) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTC_DCHECK(sink != nullptr);
rtc::CritScope cs(&sinks_and_wants_lock_);
//子類調(diào)用父類相同的函數(shù)。
VideoSourceBase::AddOrUpdateSink(sink, wants);
UpdateWants();
到這里我們了解了采集與編碼的銜接。接下來我們將看到圖像到來時(shí),編碼器的對(duì)數(shù)據(jù)的處理流程。
開始編碼
通過采集與編碼銜接得知當(dāng)圖像到來的時(shí)候會(huì)進(jìn)入VideoStreamEncoder的OnFrame函數(shù)。
void VideoStreamEncoder::OnFrame(const VideoFrame& video_frame) {
encoder_queue_.PostTask(std::unique_ptr<rtc::QueuedTask>(
new EncodeTask(incoming_frame, this, rtc::TimeMicros(), log_stats)));
bool VideoStreamEncoder::EncodeTask:: Run()override {
video_stream_encoder_->EncodeVideoFrame(frame_, time_when_posted_us_);
VideoStreamEncoder類的EncodeVideoFrame函數(shù):
void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
int64_t time_when_posted_us) {
static int captureFrameCount = 0;
static double nextCaptureStatisticsTime = -1;
static double UNIT_TIME_INTERVAL = 1000;
static int capturePreprocessingFrameCount = 0;
static double nextCapturePreprocessingStatisticsTime = -1;
captureFrameCount++;
long currentTime = clock_->TimeInMicroseconds()/rtc::kNumMicrosecsPerMillisec;
if(nextCaptureStatisticsTime == -1) {
nextCaptureStatisticsTime = currentTime + UNIT_TIME_INTERVAL;
}
if(currentTime > nextCaptureStatisticsTime) {
RTC_LOG(LS_INFO) << "statistics VideoStreamEncoder capture frame count:" << captureFrameCount;
nextCaptureStatisticsTime = currentTime + UNIT_TIME_INTERVAL;
captureFrameCount = 0;
}
RTC_DCHECK_RUN_ON(&encoder_queue_);
if (pre_encode_callback_)
pre_encode_callback_->OnFrame(video_frame);
if (!last_frame_info_ || video_frame.width() != last_frame_info_->width ||
video_frame.height() != last_frame_info_->height ||
video_frame.is_texture() != last_frame_info_->is_texture) {
pending_encoder_reconfiguration_ = true;
last_frame_info_ = rtc::Optional<VideoFrameInfo>(VideoFrameInfo(
video_frame.width(), video_frame.height(), video_frame.is_texture()));
RTC_LOG(LS_INFO) << "Video frame parameters changed: dimensions="
<< last_frame_info_->width << "x"
<< last_frame_info_->height
<< ", texture=" << last_frame_info_->is_texture << ".";
}
if (initial_rampup_ < kMaxInitialFramedrop &&
video_frame.size() >
MaximumFrameSizeForBitrate(encoder_start_bitrate_bps_ / 1000)) {
RTC_LOG(LS_INFO) << "Dropping frame. Too large for target bitrate.";
AdaptDown(kQuality);
++initial_rampup_;
return;
}
initial_rampup_ = kMaxInitialFramedrop;
int64_t now_ms = clock_->TimeInMilliseconds();
if (pending_encoder_reconfiguration_) {
ReconfigureEncoder();
last_parameters_update_ms_.emplace(now_ms);
} else if (!last_parameters_update_ms_ ||
now_ms - *last_parameters_update_ms_ >=
vcm::VCMProcessTimer::kDefaultProcessIntervalMs) {
video_sender_.UpdateChannelParemeters(rate_allocator_.get(),
bitrate_observer_);
last_parameters_update_ms_.emplace(now_ms);
}
if (EncoderPaused()) {
TraceFrameDropStart();
return;
}
TraceFrameDropEnd();
VideoFrame out_frame(video_frame);
// Crop frame if needed.
if (crop_width_ > 0 || crop_height_ > 0) {
int cropped_width = video_frame.width() - crop_width_;
int cropped_height = video_frame.height() - crop_height_;
rtc::scoped_refptr<I420Buffer> cropped_buffer =
I420Buffer::Create(cropped_width, cropped_height);
// TODO(ilnik): Remove scaling if cropping is too big, as it should never
// happen after SinkWants signaled correctly from ReconfigureEncoder.
if (crop_width_ < 4 && crop_height_ < 4) {
cropped_buffer->CropAndScaleFrom(
*video_frame.video_frame_buffer()->ToI420(), crop_width_ / 2,
crop_height_ / 2, cropped_width, cropped_height);
} else {
cropped_buffer->ScaleFrom(
*video_frame.video_frame_buffer()->ToI420().get());
}
out_frame =
VideoFrame(cropped_buffer, video_frame.timestamp(),
video_frame.render_time_ms(), video_frame.rotation());
out_frame.set_ntp_time_ms(video_frame.ntp_time_ms());
}
TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", video_frame.render_time_ms(),
"Encode");
overuse_detector_->FrameCaptured(out_frame, time_when_posted_us);
capturePreprocessingFrameCount++;
long currentPreprocessingTime = clock_->TimeInMicroseconds()/rtc::kNumMicrosecsPerMillisec;
if(nextCapturePreprocessingStatisticsTime == -1) {
nextCapturePreprocessingStatisticsTime = currentPreprocessingTime + UNIT_TIME_INTERVAL;
}
if(currentPreprocessingTime > nextCapturePreprocessingStatisticsTime) {
RTC_LOG(LS_INFO) << "statistics VideoStreamEncoder preprocessing capture frame count:"
<< capturePreprocessingFrameCount;
nextCapturePreprocessingStatisticsTime = currentPreprocessingTime + UNIT_TIME_INTERVAL;
capturePreprocessingFrameCount = 0;
}
//有必要的話先裁剪縮放,然后調(diào)用VideoSender的AddVideoFrame函數(shù)。
//這個(gè)函數(shù)做了很多工作, 需要細(xì)細(xì)研究。
video_sender_.AddVideoFrame(out_frame, nullptr);
}
VideoSender類的AddVideoFrame函數(shù):
// Add one raw video frame to the encoder, blocking.
int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
const CodecSpecificInfo* codecSpecificInfo) {
int32_t ret =
_encoder->Encode(converted_frame, codecSpecificInfo, next_frame_types);
VCMGenericEncoder類的Encode函數(shù):
int32_t VCMGenericEncoder::Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific,
const std::vector<FrameType>& frame_types) {
//在這里經(jīng)過一些wrapper封裝的類之后,最終進(jìn)入到HardwareVideoEncoder.java的
//encode函數(shù)進(jìn)行編碼,編碼完成之后調(diào)用傳遞過來的回調(diào),將編碼后的圖像傳遞上
//去,進(jìn)行rtp打包發(fā)送。
return encoder_->Encode(frame, codec_specific, &frame_types);
到了這里webrtc視頻的采集和編碼整個(gè)流程就基本完成了,具體細(xì)節(jié)需要深入研究,感興趣的可以進(jìn)一步的研究。整個(gè)流程可能出現(xiàn)偏差,但是大致方向是對(duì)的。作者比較懶,沒有給出時(shí)序圖,那位有興趣的可以自行畫出。還有那位大佬如果知道webrtc的android版本如何打印堆棧,請(qǐng)告知。在下測試了好多方法,最終沒能搞定。
最后說明一下,閱讀webrtc的采集和編碼是為了進(jìn)行一些細(xì)節(jié)的優(yōu)化,或者是策略的選擇,更甚是某些平臺(tái)性能的提升,基于此,寫了這篇文章,方便自己日后的復(fù)習(xí),以及和大家相互學(xué)習(xí)。
基于采集和編碼流程的了解,以及部分細(xì)節(jié)的研究,我在相應(yīng)平臺(tái)上做了視頻質(zhì)量的優(yōu)化。參加下一篇文章《webrtc之Android視頻質(zhì)量提升》。