import UIKit
import AVFoundation
protocol CaptureManagerDelegate: AnyObject {
func processSampleBuffer(sampleBuffer: CMSampleBuffer, type: AVMediaType)
}
class ZGCapture: NSObject {
weak var delegate: CaptureManagerDelegate?
/// 最小感光度ISO
var minISO: Float {
return currentDevice?.activeFormat.minISO ?? 0
}
/// 最大感光度ISO
var maxISO: Float {
return currentDevice?.activeFormat.maxISO ?? 0
}
/// 最小快門速度S
var minExposureDuration: CMTime {
return currentDevice?.activeFormat.minExposureDuration ?? .zero
}
/// 最大快門速度S
var maxExposureDuration: CMTime {
return currentDevice?.activeFormat.maxExposureDuration ?? .zero
}
var minExposureTargetBias: Float {
return currentDevice?.minExposureTargetBias ?? .zero
}
var maxExposureTargetBias: Float {
return currentDevice?.maxExposureTargetBias ?? .zero
}
/// 當(dāng)前縮放
var zoom: CGFloat {
return currentDevice?.videoZoomFactor ?? 1
}
/// 最大縮放
var maxZoom: CGFloat {
return currentDevice?.maxAvailableVideoZoomFactor ?? 1
}
/// 最小縮放
var minZoom: CGFloat {
return currentDevice?.minAvailableVideoZoomFactor ?? 1
}
// MARK: - 曝光補(bǔ)償
var exposureTargetBias: Float {
return currentDevice?.exposureTargetBias ?? .zero
}
private var autoExposureDuration: CMTime = .zero
private(set) var videoDevices = [AVCaptureDevice]()
private(set) var currentDevice: AVCaptureDevice?
private var captureSession = AVCaptureSession()
private var captureConnection: AVCaptureConnection?
private var currentVideoInput: AVCaptureDeviceInput?
private var videoQueue = DispatchQueue(label: "videoQueue")
private var previewLayer: AVCaptureVideoPreviewLayer?
private var currentBackDeviceIndex = 0
override init() {
super.init()
getVideoDevices()
initCapture(deviceIndex: 0)
}
func setPreview(preview: UIView) {
previewLayer?.removeFromSuperlayer()
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.frame = preview.bounds
previewLayer?.videoGravity = .resizeAspectFill
preview.layer.insertSublayer(previewLayer!, at: 0)
}
func startRecordVideo() {
startCapture()
}
func stopRecordVideo(){
stopCapture()
}
private func startCapture() {
if captureSession.isRunning {
captureSession.stopRunning()
}
videoQueue.async {
self.captureSession.startRunning()
}
}
// 結(jié)束采集
private func stopCapture() {
if captureSession.isRunning {
captureSession.stopRunning()
}
}
private func getVideoDevices(){
//廣角、長(zhǎng)焦
var deviceTypes:[AVCaptureDevice.DeviceType] = [.builtInWideAngleCamera, .builtInTelephotoCamera]
if #available(iOS 10.2, *) {
//雙攝廣角
deviceTypes.append(.builtInDualCamera)
}
if #available(iOS 11.1, *) {
//組合
deviceTypes.append(.builtInTrueDepthCamera)
}
if #available(iOS 13.0, *) {
//超廣角、超廣角+廣角、超廣角+廣角+長(zhǎng)焦
deviceTypes += [.builtInUltraWideCamera, .builtInDualWideCamera, .builtInTripleCamera]
}
let deviceSession = AVCaptureDevice.DiscoverySession(deviceTypes: deviceTypes, mediaType: .video, position: .back)
videoDevices = deviceSession.devices
}
///選擇或切換后置鏡頭
func initCapture(deviceIndex: Int){
guard deviceIndex < videoDevices.count else {
return
}
let videoDevice = videoDevices[deviceIndex]
currentBackDeviceIndex = deviceIndex
DispatchQueue.global().async {
self.initDevice(device: videoDevice)
}
}
///切換前后置攝像頭
func switchPosition(position: AVCaptureDevice.Position){
if position == .front,
let device = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: .video, position: .front).devices.first {
setTorchMode(mode: .off)
initDevice(device: device)
}
else{
initCapture(deviceIndex: currentBackDeviceIndex)
}
}
private func initDevice(device: AVCaptureDevice){
let isRestoration = captureSession.isRunning
stopCapture()
// 設(shè)置輸入源
guard let videoInput = try? AVCaptureDeviceInput(device: device) else {
return
}
// 記錄當(dāng)前采集設(shè)備
currentDevice = device
autoExposureDuration = device.exposureDuration
// 設(shè)置輸出源
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: videoQueue)
// 拋棄過(guò)期幀
videoOutput.alwaysDiscardsLateVideoFrames = true
// 設(shè)置輸出格式
videoOutput.videoSettings = [
kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange),
]
captureSession.beginConfiguration()
//移除舊源
if let currentVideoDeviceInput = captureSession.inputs.first as? AVCaptureDeviceInput {
captureSession.removeInput(currentVideoDeviceInput)
}
if let currentVideoOutput = captureSession.outputs.first {
captureSession.removeOutput(currentVideoOutput)
}
//添加源
if captureSession.canAddInput(videoInput) {
captureSession.addInput(videoInput)
currentVideoInput = videoInput
}
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
captureSession.commitConfiguration()
captureConnection = videoOutput.connection(with: AVMediaType.video)
// 視頻錄制方向
setupVideoOrientation(orientation: .portrait)
if isRestoration {
startCapture()
}
}
private func setupVideoOrientation(deviceOrientation: UIDeviceOrientation) {
// 設(shè)置視頻方向
var videoOrientation: AVCaptureVideoOrientation = .portrait
switch deviceOrientation {
case .portrait:
videoOrientation = .portrait
case .portraitUpsideDown:
videoOrientation = .portraitUpsideDown
case .landscapeLeft:
videoOrientation = .landscapeLeft
case .landscapeRight:
videoOrientation = .landscapeRight
default:
break
}
self.setupVideoOrientation(orientation: videoOrientation)
}
/// 設(shè)置視頻方向
private func setupVideoOrientation(orientation: AVCaptureVideoOrientation) {
if (self.captureConnection?.isVideoOrientationSupported ?? false) {
captureConnection?.videoOrientation = orientation
}
}
//設(shè)置分辨率和幀率,因?yàn)楹臅r(shí),在未設(shè)置完又beginConfiguration會(huì)出錯(cuò),所以加個(gè)Queue用于取消
private let setResolutionAndFpsQueue = DispatchQueue(label: "serialQueue")
private var workItem: DispatchWorkItem?
func setResolutionAndFps(width: Int, height: Int, frameRate: Float64) {
workItem?.cancel()
workItem = DispatchWorkItem { [self] in
guard let captureDevice = currentDevice else { return }
let size = CGSize(width: width, height: height)
for vFormat in captureDevice.formats {
let maxRate = vFormat.videoSupportedFrameRateRanges.first?.maxFrameRate ?? 30
guard maxRate >= frameRate else { continue }
let description = vFormat.formatDescription
let dims = CMVideoFormatDescriptionGetDimensions(description)
//分辨率
if dims.width == Int32(size.width) && dims.height == Int32(size.height) {
//幀率
captureSession.beginConfiguration()
do {
try captureDevice.lockForConfiguration()
captureDevice.activeFormat = vFormat
captureDevice.activeVideoMinFrameDuration = CMTimeMake(value: 1, timescale: Int32(frameRate))
captureDevice.activeVideoMaxFrameDuration = CMTimeMake(value: 1, timescale: Int32(frameRate))
captureDevice.unlockForConfiguration()
captureSession.commitConfiguration()
break
} catch {
captureSession.commitConfiguration()
}
}
}
print("設(shè)置分辨率:\(size) 幀率:\(frameRate)")
}
setResolutionAndFpsQueue.async(execute: workItem!)
}
/// 設(shè)置ISO/快門自動(dòng)
func setAutoMode() {
guard let currentDevice = currentDevice,
currentDevice.isExposureModeSupported(.autoExpose)
else { return }
try? currentDevice.lockForConfiguration()
currentDevice.setExposureModeCustom(duration: autoExposureDuration, iso: currentDevice.iso) { [weak currentDevice] _ in
currentDevice?.exposureMode = .autoExpose
currentDevice?.unlockForConfiguration()
}
}
/// 感光度?ISO
func setISO(value: Float) {
guard let currentDevice = currentDevice else {
return
}
try? currentDevice.lockForConfiguration()
let clampedISO = min(max(value, minISO), maxISO) // 確保ISO值在范圍內(nèi)
currentDevice.setExposureModeCustom(duration: AVCaptureDevice.currentExposureDuration, iso: clampedISO, completionHandler: nil)
currentDevice.unlockForConfiguration()
}
/// 快門速度
func setShutterSpeed(value: Float) {
guard let currentDevice = currentDevice else {
return
}
let maxValue = maxExposureDuration.seconds
let minValue = minExposureDuration.seconds
let value = Double(value)
var duration: CMTime
if value < minValue {
duration = minExposureDuration
} else if value > maxValue {
duration = maxExposureDuration
} else {
let scale: Int32 = 10000
let v = max(Int64(value * Double(scale)), 1)
duration = CMTimeMake(value: v, timescale: scale)
}
try? currentDevice.lockForConfiguration()
currentDevice.setExposureModeCustom(duration: duration, iso: currentDevice.iso) { [weak currentDevice] _ in
currentDevice?.exposureMode = .custom
currentDevice?.unlockForConfiguration()
}
}
// 對(duì)焦模式
func setFocus(mode: AVCaptureDevice.FocusMode, point: CGPoint? = nil) {
guard let currentDevice = currentDevice,
currentDevice.isFocusModeSupported(mode)
else { return }
try? currentDevice.lockForConfiguration()
if currentDevice.isFocusPointOfInterestSupported,
let point = point,
let castPoint = previewLayer?.captureDevicePointConverted(fromLayerPoint: point) {
currentDevice.focusPointOfInterest = castPoint
}
currentDevice.focusMode = mode
currentDevice.unlockForConfiguration()
}
func setFocus(lensPosition: Float) {
guard let currentDevice = currentDevice,
currentDevice.isLockingFocusWithCustomLensPositionSupported
else { return }
try? currentDevice.lockForConfiguration()
currentDevice.setFocusModeLocked(lensPosition: lensPosition)
currentDevice.unlockForConfiguration()
}
// 設(shè)置曝光量 EV (-8 -- 8)
func setExposure(value: Float) {
guard let currentDevice = currentDevice,
currentDevice.isExposureModeSupported(.locked)
else { return }
try? currentDevice.lockForConfiguration()
currentDevice.setExposureTargetBias(value)
currentDevice.unlockForConfiguration()
}
/// 設(shè)置白平衡模式 WB
func setWhiteBalance(mode: AVCaptureDevice.WhiteBalanceMode) {
guard let currentDevice = currentDevice,
currentDevice.isWhiteBalanceModeSupported(mode)
else { return }
try? currentDevice.lockForConfiguration()
currentDevice.whiteBalanceMode = mode
currentDevice.unlockForConfiguration()
}
/// 設(shè)置白平衡量 WB
func setWhiteBalance(temperature: Float) {
guard let currentDevice = currentDevice,
currentDevice.isWhiteBalanceModeSupported(.locked)
else { return }
let temperatureAndTintValues = AVCaptureDevice.WhiteBalanceTemperatureAndTintValues(temperature: temperature, tint: 0)
let whiteBalanceGains = currentDevice.deviceWhiteBalanceGains(for: temperatureAndTintValues)
let maxWhiteBalanceGain = currentDevice.maxWhiteBalanceGain
var fixWhiteBalanceGains = whiteBalanceGains
fixWhiteBalanceGains.redGain = max(1.0, min(maxWhiteBalanceGain, whiteBalanceGains.redGain))
fixWhiteBalanceGains.greenGain = max(1.0, min(maxWhiteBalanceGain, whiteBalanceGains.greenGain))
fixWhiteBalanceGains.blueGain = max(1.0, min(maxWhiteBalanceGain, whiteBalanceGains.blueGain))
try? currentDevice.lockForConfiguration()
currentDevice.setWhiteBalanceModeLocked(with: fixWhiteBalanceGains)
currentDevice.unlockForConfiguration()
}
///縮放
func setZoom(factor: CGFloat){
guard let currentDevice = currentDevice else { return }
let minZoom = currentDevice.minAvailableVideoZoomFactor
let maxZoom = currentDevice.maxAvailableVideoZoomFactor
let zoom = min(maxZoom, max(factor, minZoom))
try? currentDevice.lockForConfiguration()
currentDevice.ramp(toVideoZoomFactor: zoom, withRate: 4.0)
currentDevice.unlockForConfiguration()
}
///閃光燈
func setTorchMode(mode: AVCaptureDevice.TorchMode){
guard let currentDevice = currentDevice else { return }
try? currentDevice.lockForConfiguration()
currentDevice.torchMode = mode
currentDevice.unlockForConfiguration()
}
}
extension ZGCapture: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if connection == captureConnection {
// sampleBuffer 就是我們拿到的畫(huà)面,美顏等操作都是對(duì) sampleBuffer 進(jìn)行的
// print("已經(jīng)采集視頻—-video")
delegate?.processSampleBuffer(sampleBuffer: sampleBuffer, type: .video)
} else {
print("已經(jīng)采集音頻--audio")
}
}
}
Swift-AVCapture視頻采集
最后編輯于 :
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時(shí)請(qǐng)結(jié)合常識(shí)與多方信息審慎甄別。
平臺(tái)聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡(jiǎn)書(shū)系信息發(fā)布平臺(tái),僅提供信息存儲(chǔ)服務(wù)。
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時(shí)請(qǐng)結(jié)合常識(shí)與多方信息審慎甄別。
平臺(tái)聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡(jiǎn)書(shū)系信息發(fā)布平臺(tái),僅提供信息存儲(chǔ)服務(wù)。
相關(guān)閱讀更多精彩內(nèi)容
- 概述 音視頻采集是直播架構(gòu)的第一步 音視頻采集包括兩部分視頻采集音頻采集 iOS 開(kāi)發(fā)中,同音視頻采集相關(guān) API...
- 本章介紹一下視頻采集的實(shí)現(xiàn),主要有功能有1.音、視頻文件錄制播放2.焦距設(shè)置3.防抖功能4.攝像頭切換5.手電筒功...
- 您有好的視頻,我有好的平臺(tái)——優(yōu)質(zhì)教學(xué)視頻采集中... 1、平臺(tái)簡(jiǎn)介:“菠蘿微課”是一個(gè)全科性的優(yōu)質(zhì)教學(xué)視頻與圖文...
- 音視頻流媒體開(kāi)發(fā)-目錄[http://www.itdecent.cn/p/5a868a667838]iOS知識(shí)點(diǎn)...
- 整體架構(gòu) 以LFLiveSession為中心切分成3部分: 前面是音視頻的數(shù)據(jù)采集 后面是音視頻數(shù)據(jù)推送到服務(wù)器 ...