顯示框架之SurfaceFlinger GPU合成

對于不支持device合成的layer,SurfaceFlinger會采用GPU來合成,然后與device合成的layer在hwc進(jìn)行同步再送給屏幕。
在SurfaceFlinger啟動的時候就已經(jīng)搭建好了EGL環(huán)境,具體如下:

文件:frameworks/native/services/surfaceflinger/SurfaceFlinger.cpp

void SurfaceFlinger::init() {
    ALOGI(  "SurfaceFlinger's main thread ready to run. "
            "Initializing graphics H/W...");
    Mutex::Autolock _l(mStateLock);

    // Get a RenderEngine for the given display / config (can't fail)
    // TODO(b/77156734): We need to stop casting and use HAL types when possible.
    // Sending maxFrameBufferAcquiredBuffers as the cache size is tightly tuned to single-display.
    // 創(chuàng)建RenderEngine對象
    mCompositionEngine->setRenderEngine(renderengine::RenderEngine::create(
            renderengine::RenderEngineCreationArgs::Builder()
                .setPixelFormat(static_cast<int32_t>(defaultCompositionPixelFormat))
                .setImageCacheSize(maxFrameBufferAcquiredBuffers)
                .setUseColorManagerment(useColorManagement)
                .setEnableProtectedContext(enable_protected_contents(false))
                .setPrecacheToneMapperShaderOnly(false)
                .setSupportsBackgroundBlur(mSupportsBlur)
                .setContextPriority(useContextPriority
                        ? renderengine::RenderEngine::ContextPriority::HIGH
                        : renderengine::RenderEngine::ContextPriority::MEDIUM)
                .build()));

文件:frameworks/native/libs/renderengine/RenderEngine.cpp

std::unique_ptr<impl::RenderEngine> RenderEngine::create(const RenderEngineCreationArgs& args) {
    char prop[PROPERTY_VALUE_MAX];
     // 如果PROPERTY_DEBUG_RENDERENGINE_BACKEND 屬性不設(shè),則默認(rèn)是gles類型
    property_get(PROPERTY_DEBUG_RENDERENGINE_BACKEND, prop, "gles");
    if (strcmp(prop, "gles") == 0) {
        ALOGD("RenderEngine GLES Backend");
        // 創(chuàng)建GLESRenderEngine對象
        return renderengine::gl::GLESRenderEngine::create(args);
    }
    ALOGE("UNKNOWN BackendType: %s, create GLES RenderEngine.", prop);
    return renderengine::gl::GLESRenderEngine::create(args);
}

文件:frameworks/native/libs/renderengine/gl/GLESRenderEngine.cpp

std::unique_ptr<GLESRenderEngine> GLESRenderEngine::create(const RenderEngineCreationArgs& args) {
    // initialize EGL for the default display
    // 獲得EGLDisplay
    EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
    if (!eglInitialize(display, nullptr, nullptr)) {
        LOG_ALWAYS_FATAL("failed to initialize EGL");
    }
     // 查詢EGL版本信息
    const auto eglVersion = eglQueryStringImplementationANDROID(display, EGL_VERSION);
    if (!eglVersion) {
        checkGlError(__FUNCTION__, __LINE__);
        LOG_ALWAYS_FATAL("eglQueryStringImplementationANDROID(EGL_VERSION) failed");
    }
    //查詢EGL支持哪些拓展
    const auto eglExtensions = eglQueryStringImplementationANDROID(display, EGL_EXTENSIONS);
    if (!eglExtensions) {
        checkGlError(__FUNCTION__, __LINE__);
        LOG_ALWAYS_FATAL("eglQueryStringImplementationANDROID(EGL_EXTENSIONS) failed");
    }

    //根據(jù)支持的拓展設(shè)置屬性,目前來看所有的屬性都為true
    GLExtensions& extensions = GLExtensions::getInstance();
    extensions.initWithEGLStrings(eglVersion, eglExtensions);

    // The code assumes that ES2 or later is available if this extension is
    // supported.
    EGLConfig config = EGL_NO_CONFIG;
    if (!extensions.hasNoConfigContext()) {
        config = chooseEglConfig(display, args.pixelFormat, /*logConfig*/ true);
    }

    bool useContextPriority =
            extensions.hasContextPriority() && args.contextPriority == ContextPriority::HIGH;
    EGLContext protectedContext = EGL_NO_CONTEXT;
    if (args.enableProtectedContext && extensions.hasProtectedContent()) {
        protectedContext = createEglContext(display, config, nullptr, useContextPriority,
                                            Protection::PROTECTED);
        ALOGE_IF(protectedContext == EGL_NO_CONTEXT, "Can't create protected context");
    }
    // 創(chuàng)建非protect的EglContext
    EGLContext ctxt = createEglContext(display, config, protectedContext, useContextPriority,
                                       Protection::UNPROTECTED);
    LOG_ALWAYS_FATAL_IF(ctxt == EGL_NO_CONTEXT, "EGLContext creation failed");

    EGLSurface dummy = EGL_NO_SURFACE;
     // 支持該屬性,不走if邏輯
    if (!extensions.hasSurfacelessContext()) {
        dummy = createDummyEglPbufferSurface(display, config, args.pixelFormat,
                                             Protection::UNPROTECTED);
        LOG_ALWAYS_FATAL_IF(dummy == EGL_NO_SURFACE, "can't create dummy pbuffer");
    }
    // eglMakeCurrent 將 EGLDisplay和EglContext 綁定
    EGLBoolean success = eglMakeCurrent(display, dummy, dummy, ctxt);
    LOG_ALWAYS_FATAL_IF(!success, "can't make dummy pbuffer current");
    ...
    std::unique_ptr<GLESRenderEngine> engine;
    switch (version) {
        case GLES_VERSION_1_0:
        case GLES_VERSION_1_1:
            LOG_ALWAYS_FATAL("SurfaceFlinger requires OpenGL ES 2.0 minimum to run.");
            break;
        case GLES_VERSION_2_0:
        case GLES_VERSION_3_0:
            // GLESRenderEngine 初始化
            engine = std::make_unique<GLESRenderEngine>(args, display, config, ctxt, dummy,
                                                        protectedContext, protectedDummy);
            break;
    }
...
}

GLESRenderEngine::GLESRenderEngine(const RenderEngineCreationArgs& args, EGLDisplay display,
                                   EGLConfig config, EGLContext ctxt, EGLSurface dummy,
                                   EGLContext protectedContext, EGLSurface protectedDummy)
      : renderengine::impl::RenderEngine(args),
        mEGLDisplay(display),
        mEGLConfig(config),
        mEGLContext(ctxt),
        mDummySurface(dummy),
        mProtectedEGLContext(protectedContext),
        mProtectedDummySurface(protectedDummy),
        mVpWidth(0),
        mVpHeight(0),
        mFramebufferImageCacheSize(args.imageCacheSize),
        mUseColorManagement(args.useColorManagement) {
    // 查詢可支持最大的紋理尺寸和視圖大小
    glGetIntegerv(GL_MAX_TEXTURE_SIZE, &mMaxTextureSize);
    glGetIntegerv(GL_MAX_VIEWPORT_DIMS, mMaxViewportDims);
    //像素?cái)?shù)據(jù)按4字節(jié)對齊
    glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
    glPixelStorei(GL_PACK_ALIGNMENT, 4);
    ...
      // 色彩空間相關(guān)設(shè)置,遇到具體場景再分析
     if (mUseColorManagement) {
        const ColorSpace srgb(ColorSpace::sRGB());
        const ColorSpace displayP3(ColorSpace::DisplayP3());
        const ColorSpace bt2020(ColorSpace::BT2020());

        // no chromatic adaptation needed since all color spaces use D65 for their white points.
        mSrgbToXyz = mat4(srgb.getRGBtoXYZ());
        mDisplayP3ToXyz = mat4(displayP3.getRGBtoXYZ());
        mBt2020ToXyz = mat4(bt2020.getRGBtoXYZ());
        mXyzToSrgb = mat4(srgb.getXYZtoRGB());
        mXyzToDisplayP3 = mat4(displayP3.getXYZtoRGB());
        mXyzToBt2020 = mat4(bt2020.getXYZtoRGB());

        // Compute sRGB to Display P3 and BT2020 transform matrix.
        // NOTE: For now, we are limiting output wide color space support to
        // Display-P3 and BT2020 only.
        mSrgbToDisplayP3 = mXyzToDisplayP3 * mSrgbToXyz;
        mSrgbToBt2020 = mXyzToBt2020 * mSrgbToXyz;

        // Compute Display P3 to sRGB and BT2020 transform matrix.
        mDisplayP3ToSrgb = mXyzToSrgb * mDisplayP3ToXyz;
        mDisplayP3ToBt2020 = mXyzToBt2020 * mDisplayP3ToXyz;

        // Compute BT2020 to sRGB and Display P3 transform matrix
        mBt2020ToSrgb = mXyzToSrgb * mBt2020ToXyz;
        mBt2020ToDisplayP3 = mXyzToDisplayP3 * mBt2020ToXyz;
    }
    ...
     // 涉及到有模糊的layer,具體場景再分析
    if (args.supportsBackgroundBlur) {
        mBlurFilter = new BlurFilter(*this);
        checkErrors("BlurFilter creation");
    }
    // 創(chuàng)建ImageManager 線程,這個線程是管理輸入的mEGLImage
    mImageManager = std::make_unique<ImageManager>(this);
    mImageManager->initThread();
    //創(chuàng)建GLFramebuffer
    mDrawingBuffer = createFramebuffer();
    ...
}
    
文件:frameworks/native/libs/renderengine/gl/GLFramebuffer.cpp

// 創(chuàng)建了一個紋理ID mTextureName,和 fb ID mFramebufferName
GLFramebuffer::GLFramebuffer(GLESRenderEngine& engine)
      : mEngine(engine), mEGLDisplay(engine.getEGLDisplay()), mEGLImage(EGL_NO_IMAGE_KHR) {
    glGenTextures(1, &mTextureName);
    glGenFramebuffers(1, &mFramebufferName);
}

啟動之初就搭建好了EGL環(huán)境,并將當(dāng)前線程與context綁定,為后面使用gl命令做好準(zhǔn)備,然后創(chuàng)建了ImageManager 線程,這個線程是管理輸入Buffer的EGLImage,然后創(chuàng)建了GLFrameBuffer,用來操作輸出的buffer。
來看下輸入的紋理,在創(chuàng)建BufferQueueLayer時就已經(jīng)對各個layer創(chuàng)建了紋理ID,為后面走GPU合成做準(zhǔn)備。

文件:frameworks/native/services/surfaceflinger/SurfaceFlinger.cpp

status_t SurfaceFlinger::createBufferQueueLayer(const sp<Client>& client, std::string name,
                                                uint32_t w, uint32_t h, uint32_t flags,
                                                LayerMetadata metadata, PixelFormat& format,
                                                sp<IBinder>* handle,
                                                sp<IGraphicBufferProducer>* gbp,
                                                sp<Layer>* outLayer) {
    ...
    
    args.textureName = getNewTexture();
    ...
}

uint32_t SurfaceFlinger::getNewTexture() {
    {
        std::lock_guard lock(mTexturePoolMutex);
        if (!mTexturePool.empty()) {
            uint32_t name = mTexturePool.back();
            mTexturePool.pop_back();
            ATRACE_INT("TexturePoolSize", mTexturePool.size());
            return name;
        }

        // The pool was too small, so increase it for the future
        ++mTexturePoolSize;
    }

    // The pool was empty, so we need to get a new texture name directly using a
    // blocking call to the main thread
    // 每個layer,調(diào)用glGenTextures 生成紋理ID,schedule運(yùn)行在sf主線程
    return schedule([this] {
               uint32_t name = 0;
               getRenderEngine().genTextures(1, &name);
               return name;
           })
            .get();
}

回到composeSurfaces這個函數(shù),看下走GPU合成的邏輯。

文件:frameworks/native/services/surfaceflinger/CompositionEngine/src/Output.cpp

std::optional<base::unique_fd> Output::composeSurfaces(
        const Region& debugRegion, const compositionengine::CompositionRefreshArgs& refreshArgs) {

...
    base::unique_fd fd;
    sp<GraphicBuffer> buf;

    // If we aren't doing client composition on this output, but do have a
    // flipClientTarget request for this frame on this output, we still need to
    // dequeue a buffer.
    if (hasClientComposition || outputState.flipClientTarget) {
        // dequeueBuffer一塊Buffer,這塊Buffer作為輸出
        buf = mRenderSurface->dequeueBuffer(&fd);
        if (buf == nullptr) {
            ALOGW("Dequeuing buffer for display [%s] failed, bailing out of "
                  "client composition for this frame",
                  mName.c_str());
            return {};
        }
    }

    base::unique_fd readyFence;
    // GPU合成時不返回
    if (!hasClientComposition) {
        setExpensiveRenderingExpected(false);
        return readyFence;
    }

    ALOGV("hasClientComposition");

     // 設(shè)置clientCompositionDisplay,這個是display相關(guān)參數(shù)

    renderengine::DisplaySettings clientCompositionDisplay;
    clientCompositionDisplay.physicalDisplay = outputState.destinationClip;
    clientCompositionDisplay.clip = outputState.sourceClip;
    clientCompositionDisplay.orientation = outputState.orientation;
    clientCompositionDisplay.outputDataspace = mDisplayColorProfile->hasWideColorGamut()
       ? outputState.dataspace
            : ui::Dataspace::UNKNOWN;
    clientCompositionDisplay.maxLuminance =
            mDisplayColorProfile->getHdrCapabilities().getDesiredMaxLuminance();
    // Compute the global color transform matrix.
    if (!outputState.usesDeviceComposition && !getSkipColorTransform()) {
        clientCompositionDisplay.colorTransform = outputState.colorTransformMatrix;
    }

    // Note: Updated by generateClientCompositionRequests
    clientCompositionDisplay.clearRegion = Region::INVALID_REGION;

    // Generate the client composition requests for the layers on this output.
    // 設(shè)置clientCompositionLayers , 這個是layer的相關(guān)參數(shù)
    std::vector<LayerFE::LayerSettings> clientCompositionLayers =
            generateClientCompositionRequests(supportsProtectedContent,
                                              clientCompositionDisplay.clearRegion,
                                              clientCompositionDisplay.outputDataspace);
    appendRegionFlashRequests(debugRegion, clientCompositionLayers);

    // Check if the client composition requests were rendered into the provided graphic buffer. If
    // so, we can reuse the buffer and avoid client composition.

    // 如果cache里有相同的Buffer,則不需要重復(fù)draw一次
    if (mClientCompositionRequestCache) {
        if (mClientCompositionRequestCache->exists(buf->getId(), clientCompositionDisplay,
                                                   clientCompositionLayers)) {
            outputCompositionState.reusedClientComposition = true;
            setExpensiveRenderingExpected(false);
            return readyFence;
        }
        mClientCompositionRequestCache->add(buf->getId(), clientCompositionDisplay,
                                            clientCompositionLayers);
    }
    // We boost GPU frequency here because there will be color spaces conversion
    // or complex GPU shaders and it's expensive. We boost the GPU frequency so that
    // GPU composition can finish in time. We must reset GPU frequency afterwards,
    // because high frequency consumes extra battery.

    // 針對有模糊layer和有復(fù)雜顏色空間轉(zhuǎn)換的場景,給GPU進(jìn)行提頻
    const bool expensiveBlurs =
            refreshArgs.blursAreExpensive && mLayerRequestingBackgroundBlur != nullptr;
    const bool expensiveRenderingExpected =
            clientCompositionDisplay.outputDataspace == ui::Dataspace::DISPLAY_P3 || expensiveBlurs;
    if (expensiveRenderingExpected) {
        setExpensiveRenderingExpected(true);
    }

    // 將clientCompositionLayers 里面的內(nèi)容插入到clientCompositionLayerPointers,實(shí)質(zhì)內(nèi)容相同
    std::vector<const renderengine::LayerSettings*> clientCompositionLayerPointers;
    clientCompositionLayerPointers.reserve(clientCompositionLayers.size());
    std::transform(clientCompositionLayers.begin(), clientCompositionLayers.end(),
                   std::back_inserter(clientCompositionLayerPointers),
                   [](LayerFE::LayerSettings& settings) -> renderengine::LayerSettings* {
                       return &settings;
                   });

    const nsecs_t renderEngineStart = systemTime();
    // GPU合成,主要邏輯在drawLayers里面
    status_t status =
            renderEngine.drawLayers(clientCompositionDisplay, clientCompositionLayerPointers,
                                    buf->getNativeBuffer(), /*useFramebufferCache=*/true,
                                    std::move(fd), &readyFence);
   ...
}

輸入的Buffer是通過BufferLayer的prepareClientComposition 函數(shù)設(shè)到RenderEngine里面的。

文件:frameworks/native/services/surfaceflinger/BufferLayer.cpp

std::optional<compositionengine::LayerFE::LayerSettings> BufferLayer::prepareClientComposition(
        compositionengine::LayerFE::ClientCompositionTargetSettings& targetSettings) {
    ATRACE_CALL();

    std::optional<compositionengine::LayerFE::LayerSettings> result =
            Layer::prepareClientComposition(targetSettings);
     ...
    const State& s(getDrawingState());
    // 應(yīng)用queue過來的Buffer
    layer.source.buffer.buffer = mBufferInfo.mBuffer;
    layer.source.buffer.isOpaque = isOpaque(s);
     // acquire fence
    layer.source.buffer.fence = mBufferInfo.mFence;
    // 創(chuàng)建BufferQueueLayer時創(chuàng)建的texture ID
    layer.source.buffer.textureName = mTextureName;
    ...
}

至此,SurfaceFlinger調(diào)到RenderEngine里面,SurfaceFlinger的display和outputlayer的信息傳到了RenderEngine,這些都是GPU合成需要的信息,然后來看下drawLayers的流程。

文件:frameworks/native/libs/renderengine/gl/GLESRenderEngine.cpp

status_t GLESRenderEngine::drawLayers(const DisplaySettings& display,
                                      const std::vector<const LayerSettings*>& layers,
                                      ANativeWindowBuffer* const buffer,
                                      const bool useFramebufferCache, base::unique_fd&& bufferFence,
                                      base::unique_fd* drawFence) {
    ATRACE_CALL();
    if (layers.empty()) {
        ALOGV("Drawing empty layer stack");
        return NO_ERROR;
    }
     // 要等前一幀的release fence
    if (bufferFence.get() >= 0) {
        // Duplicate the fence for passing to waitFence.
        base::unique_fd bufferFenceDup(dup(bufferFence.get()));
        if (bufferFenceDup < 0 || !waitFence(std::move(bufferFenceDup))) {
            ATRACE_NAME("Waiting before draw");
            sync_wait(bufferFence.get(), -1);
        }
    }

    if (buffer == nullptr) {
        ALOGE("No output buffer provided. Aborting GPU composition.");
        return BAD_VALUE;
    }

    std::unique_ptr<BindNativeBufferAsFramebuffer> fbo;
    ...
    if (blurLayersSize == 0) {
         // 將dequeue出來的buffer綁定到FB上面,作為fbo
        fbo = std::make_unique<BindNativeBufferAsFramebuffer>(*this, buffer, useFramebufferCache);

文件:frameworks/native/libs/renderengine/gl/include/renderengine/RenderEngine.h

class BindNativeBufferAsFramebuffer {
public:
    BindNativeBufferAsFramebuffer(RenderEngine& engine, ANativeWindowBuffer* buffer,
                                  const bool useFramebufferCache)
          : mEngine(engine), mFramebuffer(mEngine.getFramebufferForDrawing()), mStatus(NO_ERROR) {
        mStatus = mFramebuffer->setNativeWindowBuffer(buffer, mEngine.isProtected(),
                                                      useFramebufferCache)
                ? mEngine.bindFrameBuffer(mFramebuffer)
                : NO_MEMORY;
    }
    ~BindNativeBufferAsFramebuffer() {
        mFramebuffer->setNativeWindowBuffer(nullptr, false, /*arbitrary*/ true);
        mEngine.unbindFrameBuffer(mFramebuffer);
    }
    status_t getStatus() const { return mStatus; }

private:
    RenderEngine& mEngine;
    Framebuffer* mFramebuffer;
    status_t mStatus;
};

文件: frameworks/native/libs/renderengine/gl/GLFramebuffer.cpp
bool GLFramebuffer::setNativeWindowBuffer(ANativeWindowBuffer* nativeBuffer, bool isProtected,
                                          const bool useFramebufferCache) {
    ATRACE_CALL();
    if (mEGLImage != EGL_NO_IMAGE_KHR) {
        if (!usingFramebufferCache) {
            eglDestroyImageKHR(mEGLDisplay, mEGLImage);
            DEBUG_EGL_IMAGE_TRACKER_DESTROY();
        }
        mEGLImage = EGL_NO_IMAGE_KHR;
        mBufferWidth = 0;
        mBufferHeight = 0;
    }

    if (nativeBuffer) {
        mEGLImage = mEngine.createFramebufferImageIfNeeded(nativeBuffer, isProtected,
                                                           useFramebufferCache);
        if (mEGLImage == EGL_NO_IMAGE_KHR) {
            return false;
        }
        usingFramebufferCache = useFramebufferCache;
        mBufferWidth = nativeBuffer->width;
        mBufferHeight = nativeBuffer->height;
    }
    return true;
}

文件:frameworks/native/libs/renderengine/gl/GLESRenderEngine.cpp

GLImageKHR GLESRenderEngine::createFramebufferImageIfNeeded(ANativeWindowBuffer* nativeBuffer,
                                                             bool isProtected,
                                                             bool useFramebufferCache) {
    // buffer類型轉(zhuǎn)換,將ANativeWindowBuffer 轉(zhuǎn)換成 GraphicsBuffer
    sp<GraphicBuffer> graphicBuffer = GraphicBuffer::from(nativeBuffer);
      //使用cache,如果有一樣的image,就直接返回
     if (useFramebufferCache) {
        std::lock_guard<std::mutex> lock(mFramebufferImageCacheMutex);
        for (const auto& image : mFramebufferImageCache) {
            if (image.first == graphicBuffer->getId()) {
                return image.second;
            }
        }
    }
    EGLint attributes[] = {
            isProtected ? EGL_PROTECTED_CONTENT_EXT : EGL_NONE,
            isProtected ? EGL_TRUE : EGL_NONE,
            EGL_NONE,
    };
    // 將dequeue出來的buffer作為參數(shù)創(chuàng)建 EGLImage
    EGLImageKHR image = eglCreateImageKHR(mEGLDisplay, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
                                          nativeBuffer, attributes);
    if (useFramebufferCache) {
        if (image != EGL_NO_IMAGE_KHR) {
            std::lock_guard<std::mutex> lock(mFramebufferImageCacheMutex);
            if (mFramebufferImageCache.size() >= mFramebufferImageCacheSize) {
                EGLImageKHR expired = mFramebufferImageCache.front().second;
                mFramebufferImageCache.pop_front();
                eglDestroyImageKHR(mEGLDisplay, expired);
                DEBUG_EGL_IMAGE_TRACKER_DESTROY();
            }
             // 把image放到mFramebufferImageCache 里面
            mFramebufferImageCache.push_back({graphicBuffer->getId(), image});
        }
    }

    if (image != EGL_NO_IMAGE_KHR) {
        DEBUG_EGL_IMAGE_TRACKER_CREATE();
    }
    return image;
}

status_t GLESRenderEngine::bindFrameBuffer(Framebuffer* framebuffer) {
    ATRACE_CALL();                
    GLFramebuffer* glFramebuffer = static_cast<GLFramebuffer*>(framebuffer);
    // 上一步創(chuàng)建的EGLImage
    EGLImageKHR eglImage = glFramebuffer->getEGLImage();
     // 創(chuàng)建RenderEngine 時就已經(jīng)創(chuàng)建好的 texture id和 fb id
    uint32_t textureName = glFramebuffer->getTextureName();
    uint32_t framebufferName = glFramebuffer->getFramebufferName();

    // Bind the texture and turn our EGLImage into a texture
    // 綁定texture,后面的操作將作用在這上面
    glBindTexture(GL_TEXTURE_2D, textureName);
     // 根據(jù)EGLImage 創(chuàng)建一個 2D texture
    glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, (GLeglImageOES)eglImage); 

    // Bind the Framebuffer to render into
    glBindFramebuffer(GL_FRAMEBUFFER, framebufferName);
    // 將紋理附著在幀緩存上面,渲染到farmeBuffer
    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureName, 0);

    uint32_t glStatus = glCheckFramebufferStatus(GL_FRAMEBUFFER);
    ALOGE_IF(glStatus != GL_FRAMEBUFFER_COMPLETE_OES, "glCheckFramebufferStatusOES error %d",
             glStatus);

    return glStatus == GL_FRAMEBUFFER_COMPLETE_OES ? NO_ERROR : BAD_VALUE;
}

首先將dequeue出來的buffer通過eglCreateImageKHR做成image,然后通過glEGLImageTargetTexture2DOES根據(jù)image創(chuàng)建一個2D的紋理,再通過glFramebufferTexture2D把紋理附著在幀緩存上面。setViewportAndProjection 設(shè)置視圖和投影矩陣。

文件:frameworks/native/libs/renderengine/gl/GLESRenderEngine.cpp

status_t GLESRenderEngine::drawLayers(const DisplaySettings& display,
                                      const std::vector<const LayerSettings*>& layers,
                                      ANativeWindowBuffer* const buffer,
                                      const bool useFramebufferCache, base::unique_fd&& bufferFence,
                                      base::unique_fd* drawFence) {
         ...
         // 設(shè)置頂點(diǎn)和紋理坐標(biāo)的size
         Mesh mesh = Mesh::Builder()
                        .setPrimitive(Mesh::TRIANGLE_FAN)
                        .setVertices(4 /* count */, 2 /* size */)
                        .setTexCoords(2 /* size */)
                        .setCropCoords(2 /* size */)
                        .build();
         for (auto const layer : layers) {
          //遍歷outputlayer
             ...
          //獲取layer的大小
        const FloatRect bounds = layer->geometry.boundaries;
        Mesh::VertexArray<vec2> position(mesh.getPositionArray<vec2>());
        // 設(shè)置頂點(diǎn)的坐標(biāo),逆時針方向
        position[0] = vec2(bounds.left, bounds.top);
        position[1] = vec2(bounds.left, bounds.bottom);
        position[2] = vec2(bounds.right, bounds.bottom);
        position[3] = vec2(bounds.right, bounds.top);
         //設(shè)置crop的坐標(biāo)
        setupLayerCropping(*layer, mesh);
        // 設(shè)置顏色矩陣
        setColorTransform(display.colorTransform * layer->colorTransform);
        ...
        // Buffer相關(guān)設(shè)置
        if (layer->source.buffer.buffer != nullptr) {
            disableTexture = false;
            isOpaque = layer->source.buffer.isOpaque;
             // layer的buffer,理解為輸入的buffer
            sp<GraphicBuffer> gBuf = layer->source.buffer.buffer;
            // textureName是創(chuàng)建BufferQueuelayer時生成的,用來標(biāo)識這個layer,
            // fence是acquire fence
            bindExternalTextureBuffer(layer->source.buffer.textureName, gBuf,
                                      layer->source.buffer.fence);

            ...
            // 設(shè)置紋理坐標(biāo),也是逆時針
            renderengine::Mesh::VertexArray<vec2> texCoords(mesh.getTexCoordArray<vec2>());
            texCoords[0] = vec2(0.0, 0.0);
            texCoords[1] = vec2(0.0, 1.0);
            texCoords[2] = vec2(1.0, 1.0);
            texCoords[3] = vec2(1.0, 0.0);
           // 設(shè)置紋理的參數(shù),glTexParameteri
            setupLayerTexturing(texture);
        }


status_t GLESRenderEngine::bindExternalTextureBuffer(uint32_t texName,
                                                     const sp<GraphicBuffer>& buffer,
                                                     const sp<Fence>& bufferFence) {
    if (buffer == nullptr) {
        return BAD_VALUE;
    }

    ATRACE_CALL();

    bool found = false;
    {
        // 在ImageCache里面找有沒有相同的buffer
        std::lock_guard<std::mutex> lock(mRenderingMutex);
        auto cachedImage = mImageCache.find(buffer->getId());
        found = (cachedImage != mImageCache.end());
    }

    // If we couldn't find the image in the cache at this time, then either
    // SurfaceFlinger messed up registering the buffer ahead of time or we got
    // backed up creating other EGLImages.
    if (!found) {
        //如果ImageCache里面沒有則需要重新創(chuàng)建一個EGLImage,創(chuàng)建輸入的EGLImage是在ImageManager線程里面,利用notify喚醒機(jī)制
        status_t cacheResult = mImageManager->cache(buffer);
        if (cacheResult != NO_ERROR) {
            return cacheResult;
        }
    }

    ...
        // 把EGLImage轉(zhuǎn)換成紋理,類型為GL_TEXTURE_EXTERNAL_OES
        bindExternalTextureImage(texName, *cachedImage->second);
        mTextureView.insert_or_assign(texName, buffer->getId());
    }
}

void GLESRenderEngine::bindExternalTextureImage(uint32_t texName, const Image& image) {
    ATRACE_CALL();
    const GLImage& glImage = static_cast<const GLImage&>(image);
    const GLenum target = GL_TEXTURE_EXTERNAL_OES;
     //綁定紋理,紋理ID為texName
    glBindTexture(target, texName);
    if (glImage.getEGLImage() != EGL_NO_IMAGE_KHR) {
        // 把EGLImage轉(zhuǎn)換成紋理,紋理ID為texName
        glEGLImageTargetTexture2DOES(target, static_cast<GLeglImageOES>(glImage.getEGLImage()));
    }
}

至此,將輸入和輸出的Buffer都生成了紋理對應(yīng),以及設(shè)置了紋理的坐標(biāo)和頂點(diǎn)的坐標(biāo),接下來就要使用shader進(jìn)行繪制了。

文件:frameworks/native/libs/renderengine/gl/GLESRenderEngine.cpp

void GLESRenderEngine::drawMesh(const Mesh& mesh) {
    ATRACE_CALL();
    if (mesh.getTexCoordsSize()) {
        //開啟頂點(diǎn)著色器屬性,,目的是能在頂點(diǎn)著色器中訪問頂點(diǎn)的屬性數(shù)據(jù)
        glEnableVertexAttribArray(Program::texCoords);
       // 給頂點(diǎn)著色器傳紋理的坐標(biāo)
        glVertexAttribPointer(Program::texCoords, mesh.getTexCoordsSize(), GL_FLOAT, GL_FALSE,
                              mesh.getByteStride(), mesh.getTexCoords());
    }
    //給頂點(diǎn)著色器傳頂點(diǎn)的坐標(biāo)
    glVertexAttribPointer(Program::position, mesh.getVertexSize(), GL_FLOAT, GL_FALSE,
                          mesh.getByteStride(), mesh.getPositions());
    ...
    // 創(chuàng)建頂點(diǎn)和片段著色器,將頂點(diǎn)屬性設(shè)和一些常量參數(shù)設(shè)到shader里面
    ProgramCache::getInstance().useProgram(mInProtectedContext ? mProtectedEGLContext : mEGLContext,
                                           managedState);
    ...
    // 調(diào)GPU去draw
    glDrawArrays(mesh.getPrimitive(), 0, mesh.getVertexCount());
    ...
}

文件:frameworks/native/libs/renderengine/gl/ProgramCache.cpp

void ProgramCache::useProgram(EGLContext context, const Description& description) {
    //設(shè)置key值,根據(jù)不同的key值創(chuàng)建不同的shader
    Key needs(computeKey(description));    

    // look-up the program in the cache
    auto& cache = mCaches[context];
    auto it = cache.find(needs);
    if (it == cache.end()) {
        // we didn't find our program, so generate one...
        nsecs_t time = systemTime();
        // 如果cache里面沒有相同的program則重新創(chuàng)建一個
        it = cache.emplace(needs, generateProgram(needs)).first;
        time = systemTime() - time;

        ALOGV(">>> generated new program for context %p: needs=%08X, time=%u ms (%zu programs)",
              context, needs.mKey, uint32_t(ns2ms(time)), cache.size());
    }   
    
    // here we have a suitable program for this description
    std::unique_ptr<Program>& program = it->second;
    if (program->isValid()) {
        program->use();
        program->setUniforms(description);
    }
}

std::unique_ptr<Program> ProgramCache::generateProgram(const Key& needs) {
    ATRACE_CALL();

    // 創(chuàng)建頂點(diǎn)著色器
    String8 vs = generateVertexShader(needs);

    // 創(chuàng)建片段著色器
    String8 fs = generateFragmentShader(needs);

     // 鏈接和編譯著色器
    return std::make_unique<Program>(needs, vs.string(), fs.string());
}

String8 ProgramCache::generateVertexShader(const Key& needs) {
    Formatter vs;
    if (needs.hasTextureCoords()) {
         // attribute屬性通過glVertexAttribPointer設(shè)置,varying 表示輸出給片段著色器的數(shù)據(jù)
        vs << "attribute vec4 texCoords;"
           << "varying vec2 outTexCoords;";
    }
    ...
    vs << "attribute vec4 position;"
       << "uniform mat4 projection;"
       << "uniform mat4 texture;"
       << "void main(void) {" << indent << "gl_Position = projection * position;";
    if (needs.hasTextureCoords()) {
        vs << "outTexCoords = (texture * texCoords).st;";
    }
    ...
    return vs.getString();
}

String8 ProgramCache::generateFragmentShader(const Key& needs) {
    Formatter fs;
    if (needs.getTextureTarget() == Key::TEXTURE_EXT) {
        fs << "#extension GL_OES_EGL_image_external : require";
    }

    // default precision is required-ish in fragment shaders
    fs << "precision mediump float;";

    if (needs.getTextureTarget() == Key::TEXTURE_EXT) {
        fs << "uniform samplerExternalOES sampler;";
    } else if (needs.getTextureTarget() == Key::TEXTURE_2D) {
        fs << "uniform sampler2D sampler;";
    }

    if (needs.hasTextureCoords()) {
        fs << "varying vec2 outTexCoords;";
    } 
    ...
    fs << "void main(void) {" << indent;
    ...
        if (needs.isTexturing()) {
            // 輸出像素的顏色值
            fs << "gl_FragColor = texture2D(sampler, outTexCoords);"
    ...
}
文件: frameworks/native/libs/renderengine/gl/Program.cpp

Program::Program(const ProgramCache::Key& /*needs*/, const char* vertex, const char* fragment)
      : mInitialized(false) {
    // 編譯頂點(diǎn)和片段著色器
    GLuint vertexId = buildShader(vertex, GL_VERTEX_SHADER);
    GLuint fragmentId = buildShader(fragment, GL_FRAGMENT_SHADER);
    // 創(chuàng)建programID
    GLuint programId = glCreateProgram();
    // 將頂點(diǎn)和片段著色器鏈接到programe
    glAttachShader(programId, vertexId);
    glAttachShader(programId, fragmentId);
    // 將著色器里面的屬性和自定義的屬性變量綁定
    glBindAttribLocation(programId, position, "position");
    glBindAttribLocation(programId, texCoords, "texCoords");
    glBindAttribLocation(programId, cropCoords, "cropCoords");
    glBindAttribLocation(programId, shadowColor, "shadowColor");
    glBindAttribLocation(programId, shadowParams, "shadowParams");
    glLinkProgram(programId);

    GLint status;
    glGetProgramiv(programId, GL_LINK_STATUS, &status);
    ...
        mProgram = programId;
        mVertexShader = vertexId;
        mFragmentShader = fragmentId;
        mInitialized = true;
        //獲得著色器里面uniform變量的位置
        mProjectionMatrixLoc = glGetUniformLocation(programId, "projection");
        mTextureMatrixLoc = glGetUniformLocation(programId, "texture");
        ...
        // set-up the default values for our uniforms
        glUseProgram(programId);
        glUniformMatrix4fv(mProjectionMatrixLoc, 1, GL_FALSE, mat4().asArray());
        glEnableVertexAttribArray(0);
}

void Program::use() {
    // Program生效
    glUseProgram(mProgram);
} 

void Program::setUniforms(const Description& desc) {
    // TODO: we should have a mechanism here to not always reset uniforms that
    // didn't change for this program.
    // 根據(jù)uniform的位置,給uniform變量設(shè)置,設(shè)到shader里面
    if (mSamplerLoc >= 0) {
        glUniform1i(mSamplerLoc, 0);
        glUniformMatrix4fv(mTextureMatrixLoc, 1, GL_FALSE, desc.texture.getMatrix().asArray());
    }
   ...
       glUniformMatrix4fv(mProjectionMatrixLoc, 1, GL_FALSE, desc.projectionMatrix.asArray());
    }

最后調(diào)用glDrawArrays,使用GPU來繪制,可見對于GPU來說,輸入都是一幅幅紋理,然后在著色器里面控制最后pixel的位置坐標(biāo)和顏色值。
使用GPU繪制往往伴隨著一個acquire fence,看下acquire fence的生。

文件: frameworks/native/libs/renderengine/gl/GLESRenderEngine.cpp

base::unique_fd GLESRenderEngine::flush() {
    ATRACE_CALL();
    if (!GLExtensions::getInstance().hasNativeFenceSync()) {
        return base::unique_fd();
    }
    // 創(chuàng)建一個EGLSync對象,用來標(biāo)識GPU是否繪制完
    EGLSyncKHR sync = eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_NATIVE_FENCE_ANDROID, nullptr);
    if (sync == EGL_NO_SYNC_KHR) {
        ALOGW("failed to create EGL native fence sync: %#x", eglGetError());
        return base::unique_fd();
    }

    // native fence fd will not be populated until flush() is done.
    // 將gl command命令全部刷給GPU
    glFlush();

    // get the fence fd
     //獲得android 使用的fence fd
    base::unique_fd fenceFd(eglDupNativeFenceFDANDROID(mEGLDisplay, sync));
    eglDestroySyncKHR(mEGLDisplay, sync);
    if (fenceFd == EGL_NO_NATIVE_FENCE_FD_ANDROID) {
        ALOGW("failed to dup EGL native fence sync: %#x", eglGetError());
    }

    // Only trace if we have a valid fence, as current usage falls back to
    // calling finish() if the fence fd is invalid.
    if (CC_UNLIKELY(mTraceGpuCompletion && mFlushTracer) && fenceFd.get() >= 0) {
        mFlushTracer->queueSync(eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_FENCE_KHR, nullptr));
    }

    return fenceFd;
}

到這里,CPU將命令全部給到GPU了,然后GPU自己去draw,CPU繼續(xù)往下運(yùn)行。
回到finishFrame 函數(shù),獲得GPU合成的fence后,會執(zhí)行queueBuffer操作。

文件:frameworks/native/services/surfaceflinger/CompositionEngine/src/Output.cpp

void Output::finishFrame(const compositionengine::CompositionRefreshArgs& refreshArgs) {
    ATRACE_CALL();
    ALOGV(__FUNCTION__);

    if (!getState().isEnabled) {
        return;
    }

    // Repaint the framebuffer (if needed), getting the optional fence for when
    // the composition completes.
    auto optReadyFence = composeSurfaces(Region::INVALID_REGION, refreshArgs);
    if (!optReadyFence) {
        return;
    }

    // swap buffers (presentation)
    mRenderSurface->queueBuffer(std::move(*optReadyFence));
}

文件:frameworks/native/services/surfaceflinger/CompositionEngine/src/RenderSurface.cpp

void RenderSurface::queueBuffer(base::unique_fd readyFence) {
    auto& state = mDisplay.getState();

         ...
    
        if (mGraphicBuffer == nullptr) {
            ALOGE("No buffer is ready for display [%s]", mDisplay.getName().c_str());
        } else {
            status_t result =
                    // mGraphicBuffer->getNativeBuffer() 是GPU輸出的Buffer,可以理解為GPU將內(nèi)容合成到該Buffer上
                    mNativeWindow->queueBuffer(mNativeWindow.get(),
                                               mGraphicBuffer->getNativeBuffer(), dup(readyFence));
            if (result != NO_ERROR) {
                ALOGE("Error when queueing buffer for display [%s]: %d", mDisplay.getName().c_str(),
                      result);
                // We risk blocking on dequeueBuffer if the primary display failed
                // to queue up its buffer, so crash here.
                if (!mDisplay.isVirtual()) {
                    LOG_ALWAYS_FATAL("ANativeWindow::queueBuffer failed with error: %d", result);
                } else {
                    mNativeWindow->cancelBuffer(mNativeWindow.get(),
                                                mGraphicBuffer->getNativeBuffer(), dup(readyFence));
                }
            }

            mGraphicBuffer = nullptr;
        }
    }
    // 消費(fèi)Buffer
    status_t result = mDisplaySurface->advanceFrame();
    if (result != NO_ERROR) {
        ALOGE("[%s] failed pushing new frame to HWC: %d", mDisplay.getName().c_str(), result);
    }
}

文件:frameworks/native/services/surfaceflinger/DisplayHardware/FramebufferSurface.cpp

status_t FramebufferSurface::advanceFrame() {
    uint32_t slot = 0;
    sp<GraphicBuffer> buf;
    sp<Fence> acquireFence(Fence::NO_FENCE);   
    Dataspace dataspace = Dataspace::UNKNOWN;
    // 消費(fèi)這塊Buffer
    status_t result = nextBuffer(slot, buf, acquireFence, dataspace);
    mDataSpace = dataspace;
    if (result != NO_ERROR) {
        ALOGE("error latching next FramebufferSurface buffer: %s (%d)",
                strerror(-result), result);
    }
    return result;
}

status_t FramebufferSurface::nextBuffer(uint32_t& outSlot,
        sp<GraphicBuffer>& outBuffer, sp<Fence>& outFence,
        Dataspace& outDataspace) {
    Mutex::Autolock lock(mMutex);

    BufferItem item;
    // acquire Buffer
    status_t err = acquireBufferLocked(&item, 0);
    ...
    if (mCurrentBufferSlot != BufferQueue::INVALID_BUFFER_SLOT &&
        item.mSlot != mCurrentBufferSlot) {
        mHasPendingRelease = true;
        mPreviousBufferSlot = mCurrentBufferSlot;
        mPreviousBuffer = mCurrentBuffer;
    }
    //更新當(dāng)前的Buffer和fence信息
    mCurrentBufferSlot = item.mSlot;
    mCurrentBuffer = mSlots[mCurrentBufferSlot].mGraphicBuffer;
    mCurrentFence = item.mFence;

    outFence = item.mFence;
    mHwcBufferCache.getHwcBuffer(mCurrentBufferSlot, mCurrentBuffer, &outSlot, &outBuffer);
    outDataspace = static_cast<Dataspace>(item.mDataSpace);
     // 將GPU輸出的Buffer和fence給到hwc
    status_t result = mHwc.setClientTarget(mDisplayId, outSlot, outFence, outBuffer, outDataspace);
    if (result != NO_ERROR) {
        ALOGE("error posting framebuffer: %d", result);
        return result;
    }

    return NO_ERROR;
}

GPU合成的Buffer通過setClientTarget 設(shè)給hwc,有GPU合成的layer需要先validate再present,所以還需要再present一次,邏輯在postFramebuffer 里面。

文件:frameworks/native/services/surfaceflinger/CompositionEngine/src/Output.cpp

void Output::postFramebuffer() {
    ATRACE_CALL();
    ALOGV(__FUNCTION__);
   ...
    auto frame = presentAndGetFrameFences();

    mRenderSurface->onPresentDisplayCompleted();
    ...
}
   

文件:frameworks/native/services/surfaceflinger/DisplayHardware/HWComposer.cpp
status_t HWComposer::presentAndGetReleaseFences(DisplayId displayId) {
    ATRACE_CALL();
    
    RETURN_IF_INVALID_DISPLAY(displayId, BAD_INDEX);
        
    auto& displayData = mDisplayData[displayId];
    auto& hwcDisplay = displayData.hwcDisplay;
    
     ...
    // GPU合成時執(zhí)行present,返回present fence
    auto error = hwcDisplay->present(&displayData.lastPresentFence);
    RETURN_IF_HWC_ERROR_FOR("present", error, displayId, UNKNOWN_ERROR);

    std::unordered_map<HWC2::Layer*, sp<Fence>> releaseFences;
    // 從hwc里面獲得release fence
    error = hwcDisplay->getReleaseFences(&releaseFences);
    RETURN_IF_HWC_ERROR_FOR("getReleaseFences", error, displayId, UNKNOWN_ERROR);

    displayData.releaseFences = std::move(releaseFences);

    return NO_ERROR;
}

文件: frameworks/native/services/surfaceflinger/DisplayHardware/FramebufferSurface.cpp

void FramebufferSurface::onFrameCommitted() {
    if (mHasPendingRelease) {
        sp<Fence> fence = mHwc.getPresentFence(mDisplayId);
        if (fence->isValid()) {
            // 更新BufferSlot的 fence
            status_t result = addReleaseFence(mPreviousBufferSlot,
                    mPreviousBuffer, fence);
            ALOGE_IF(result != NO_ERROR, "onFrameCommitted: failed to add the"
                    " fence: %s (%d)", strerror(-result), result);
        }
        // 釋放之前的Buffer
        status_t result = releaseBufferLocked(mPreviousBufferSlot, mPreviousBuffer);
        ALOGE_IF(result != NO_ERROR, "onFrameCommitted: error releasing buffer:"
                " %s (%d)", strerror(-result), result);
    
        mPreviousBuffer.clear();
        mHasPendingRelease = false;
    }
}

至此GPU合成的layer通過present調(diào)到hwc,hwc再執(zhí)行commit上屏,其中有一些fence同步的代碼,就先不分析了。

?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時請結(jié)合常識與多方信息審慎甄別。
平臺聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡書系信息發(fā)布平臺,僅提供信息存儲服務(wù)。

相關(guān)閱讀更多精彩內(nèi)容

友情鏈接更多精彩內(nèi)容