代码拉取完成,页面将自动刷新
/*
* Copyright (c) 2012 Stefano Sabatini
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file libavformat and libavcodec demuxing and decoding API usage example
* @example demux_decode.cpp
*
* Show how to use the libavformat and libavcodec API to demux and decode audio
* and video data. Write the output as raw audio and input files to be played by
* ffplay.
*/
extern "C" {
#include <libavutil/imgutils.h>
#include <libavutil/samplefmt.h>
#include <libavutil/timestamp.h>
#include <libavutil/time.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
#include <libavutil/opt.h>
}
#include <cstdio>
#include <cstdlib>
#include <deque> // 新增:引入 deque 头文件
#include <pthread.h> // 引入 pthread 头文件
#include <string> // 新增:引入 string 头文件
// 新增:引入SDL2头文件
extern "C" {
#include <SDL2/SDL.h>
#include <SDL2/SDL_log.h>
#include <SDL2/SDL_audio.h>
}
class PacketQueue {
public:
std::string name; // 修复:确保 name 成员变量正确定义
static const int END_OF_STREAM = -1;
PacketQueue(const std::string& queue_name) : name(queue_name), abort_request(false) {
pthread_mutex_init(&mutex, nullptr);
pthread_cond_init(&cond, nullptr);
}
void push(AVPacket *pkt) {
pthread_mutex_lock(&mutex);
// printf("[%s] push PacketQueue : stream_index=%d, pts=%lld , enqueue_count=%lld\n", name.c_str(), pkt->stream_index, pkt->pts, ++enqueue_count);
queue.push_back(pkt);
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
}
AVPacket *pop() {
pthread_mutex_lock(&mutex);
while (queue.empty() && !abort_request) {
pthread_cond_wait(&cond, &mutex);
}
if (abort_request) {
pthread_mutex_unlock(&mutex);
return nullptr;
}
AVPacket *pkt = queue.front();
// printf("[%s] pop PacketQueue : stream_index=%d, pts=%lld, dequeue_count=%lld\n", name.c_str(), pkt->stream_index, pkt->pts, ++dequeue_count);
queue.pop_front();
pthread_mutex_unlock(&mutex);
return pkt;
}
// 新增:发送结束标志
void send_end_of_stream() {
pthread_mutex_lock(&mutex);
AVPacket eof_pkt = {0};
eof_pkt.stream_index = END_OF_STREAM; // 使用特殊标志表示结束
queue.push_back(av_packet_clone(&eof_pkt));
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
}
void abort() {
pthread_mutex_lock(&mutex);
abort_request = true;
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
}
private:
std::deque<AVPacket *> queue; // 修复:deque 已正确引入
pthread_mutex_t mutex;
pthread_cond_t cond;
bool abort_request;
long enqueue_count = 0;
long dequeue_count = 0;
};
class FrameQueue {
public:
std::string name; // 修复:确保 name 成员变量正确定义
static const int END_OF_STREAM = -1;
FrameQueue(const std::string& queue_name) : name(queue_name), abort_request(false) {
pthread_mutex_init(&mutex, nullptr);
pthread_cond_init(&cond, nullptr);
}
void push(AVFrame *frame) {
pthread_mutex_lock(&mutex);
// printf("[%s] push FrameQueue : enqueue_count=%lld\n", name.c_str(), ++enqueue_count);
queue.push_back(frame);
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
}
AVFrame *pop() {
pthread_mutex_lock(&mutex);
while (queue.empty() && !abort_request) {
pthread_cond_wait(&cond, &mutex);
}
if (abort_request) {
pthread_mutex_unlock(&mutex);
return nullptr;
}
AVFrame *frame = queue.front();
// printf("[%s] pop FrameQueue : dequeue_count=%lld\n", name.c_str(), ++dequeue_count);
queue.pop_front();
pthread_mutex_unlock(&mutex);
return frame;
}
// 新增:发送结束标志
void send_end_of_stream() {
pthread_mutex_lock(&mutex);
AVFrame eof_frame = {0};
eof_frame.format = END_OF_STREAM; // 使用特殊标志表示结束
queue.push_back(av_frame_clone(&eof_frame));
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
}
void abort() {
pthread_mutex_lock(&mutex);
abort_request = true;
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
}
private:
std::deque<AVFrame *> queue; // 修复:deque 已正确引入
pthread_mutex_t mutex;
pthread_cond_t cond;
long enqueue_count = 0;
long dequeue_count = 0;
bool abort_request;
};
class Demuxer {
public:
Demuxer() : fmt_ctx(nullptr), video_dec_ctx(nullptr), audio_dec_ctx(nullptr),
width(0), height(0), pix_fmt(AV_PIX_FMT_NONE),
video_stream(nullptr), audio_stream(nullptr),
src_filename(nullptr), video_dst_filename(nullptr), audio_dst_filename(nullptr),
video_dst_file(nullptr), audio_dst_file(nullptr),
video_frame_count(0), audio_frame_count(0),
video_pkt_queue("VideoPacketQueue"), audio_pkt_queue("AudioPacketQueue"),
video_frame_queue("VideoFrameQueue"), audio_frame_queue("AudioFrameQueue") {}
~Demuxer() {
avcodec_free_context(&video_dec_ctx);
avcodec_free_context(&audio_dec_ctx);
avformat_close_input(&fmt_ctx);
if (video_dst_file)
fclose(video_dst_file);
if (audio_dst_file)
fclose(audio_dst_file);
av_packet_free(&pkt);
av_frame_free(&frame);
av_free(video_dst_data[0]);
}
// 新增子线程函数:读取帧并分发到队列
static void *read_frame_thread(void *arg) {
try {
Demuxer *demuxer = static_cast<Demuxer *>(arg);
AVPacket *pkt = av_packet_alloc();
if (!pkt) {
fprintf(stderr, "Could not allocate packet in read_frame_thread\n");
return nullptr;
}
printf(" =>> Starting read_frame_thread\n");
while (av_read_frame(demuxer->fmt_ctx, pkt) >= 0) {
if (pkt->stream_index == demuxer->video_stream_idx) {
demuxer->video_pkt_queue.push(av_packet_clone(pkt));
} else if (pkt->stream_index == demuxer->audio_stream_idx) {
demuxer->audio_pkt_queue.push(av_packet_clone(pkt));
}
av_packet_unref(pkt);
}
// 发送结束标志
if (demuxer->video_stream) {
demuxer->video_pkt_queue.send_end_of_stream();
}
if (demuxer->audio_stream) {
demuxer->audio_pkt_queue.send_end_of_stream();
}
av_packet_free(&pkt);
printf("Finished read_frame_thread\n");
} catch (const std::exception &e) {
fprintf(stderr, "Exception in read_frame_thread: %s\n", e.what());
} catch (...) {
fprintf(stderr, "Unknown exception in read_frame_thread\n");
}
return nullptr;
}
// 视频解码线程函数
static void *video_decode_thread(void *arg) {
try {
Demuxer *demuxer = static_cast<Demuxer *>(arg);
printf(" =>> Starting video_decode_thread\n");
while (true) {
AVPacket *pkt = demuxer->video_pkt_queue.pop();
if (!pkt) {
printf("Received end of video stream\n");
break; // 结束标志
}
if (pkt->stream_index == PacketQueue::END_OF_STREAM) {
av_packet_free(&pkt);
break; // 结束标志
}
printf("Decoding video packet: pts=%lld\n", pkt->pts);
demuxer->decode_packet(demuxer->video_dec_ctx, pkt);
av_packet_free(&pkt);
}
// 发送结束标志
if (demuxer->video_stream) {
demuxer->video_frame_queue.send_end_of_stream();
}
printf("Finished video_decode_thread\n");
} catch (const std::exception &e) {
fprintf(stderr, "Exception in video_decode_thread: %s\n", e.what());
} catch (...) {
fprintf(stderr, "Unknown exception in video_decode_thread\n");
}
return nullptr;
}
// 音频解码线程函数
static void *audio_decode_thread(void *arg) {
try {
Demuxer *demuxer = static_cast<Demuxer *>(arg);
printf(" =>> Starting audio_decode_thread\n");
while (true) {
AVPacket *pkt = demuxer->audio_pkt_queue.pop();
if (!pkt) {
printf("Received end of audio stream\n");
break; // 结束标志
}
if (pkt->stream_index == PacketQueue::END_OF_STREAM) {
av_packet_free(&pkt);
break; // 结束标志
}
printf("Decoding audio packet: pts=%lld\n", pkt->pts);
demuxer->decode_packet(demuxer->audio_dec_ctx, pkt);
av_packet_free(&pkt);
}
// 发送结束标志
if (demuxer->audio_stream) {
demuxer->audio_frame_queue.send_end_of_stream();
}
printf("Finished audio_decode_thread\n");
} catch (const std::exception &e) {
fprintf(stderr, "Exception in audio_decode_thread: %s\n", e.what());
} catch (...) {
fprintf(stderr, "Unknown exception in audio_decode_thread\n");
}
return nullptr;
}
// 新增视频写入线程函数
static void *video_write_thread(void *arg) {
try {
Demuxer *demuxer = static_cast<Demuxer *>(arg);
printf(" =>> Starting video_write_thread\n");
while (true) {
AVFrame *frame = demuxer->video_frame_queue.pop();
if (!frame) {
printf("Received end of video frame queue\n");
break; // 结束标志
}
if (frame->format == FrameQueue::END_OF_STREAM) {
av_frame_free(&frame);
break; // 结束标志
}
// demuxer->display_video_frame(frame);
av_frame_free(&frame);
}
printf("Finished video_write_thread\n");
} catch (const std::exception &e) {
fprintf(stderr, "Exception in video_write_thread: %s\n", e.what());
} catch (...) {
fprintf(stderr, "Unknown exception in video_write_thread\n");
}
return nullptr;
}
// 新增音频写入线程函数
static void *audio_write_thread(void *arg) {
try {
Demuxer *demuxer = static_cast<Demuxer *>(arg);
printf(" =>> Starting audio_write_thread\n");
while (true) {
AVFrame *frame = demuxer->audio_frame_queue.pop();
if (!frame) {
printf("Received end of audio frame queue\n");
break; // 结束标志
}
if (frame->format == FrameQueue::END_OF_STREAM) {
av_frame_free(&frame);
break; // 结束标志
}
demuxer->output_audio_frame(frame);
av_frame_free(&frame);
}
printf("Finished audio_write_thread\n");
} catch (const std::exception &e) {
fprintf(stderr, "Exception in audio_write_thread: %s\n", e.what());
} catch (...) {
fprintf(stderr, "Unknown exception in audio_write_thread\n");
}
return nullptr;
}
// 新增:SDL2初始化方法
bool init_sdl() {
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO) < 0) {
fprintf(stderr, "SDL initialization failed: %s\n", SDL_GetError());
return false;
}
printf("SDL initialized successfully.\n");
// 检查宽度和高度是否有效
if (width <= 0 || height <= 0) {
fprintf(stderr, "Invalid video dimensions: width=%d, height=%d\n", width, height);
return false;
}
// 初始化SDL2窗口和纹理
window = SDL_CreateWindow("Video Player", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, width, height, SDL_WINDOW_SHOWN);
if (!window) {
fprintf(stderr, "Failed to create SDL window: %s\n", SDL_GetError());
return false;
}
renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED);
if (!renderer) {
fprintf(stderr, "Failed to create SDL renderer: %s\n", SDL_GetError());
return false;
}
texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, width, height);
if (!texture) {
fprintf(stderr, "Failed to create SDL texture: %s\n", SDL_GetError());
return false;
}
return true;
}
// 新增:SDL2清理方法
void cleanup_sdl() {
if (texture) SDL_DestroyTexture(texture);
if (renderer) SDL_DestroyRenderer(renderer);
if (window) SDL_DestroyWindow(window);
SDL_Quit();
printf("SDL cleaned up.\n");
}
// 新增:音频设备初始化方法
bool init_audio_device() {
if (SDL_Init(SDL_INIT_AUDIO) < 0) {
fprintf(stderr, "SDL audio initialization failed: %s\n", SDL_GetError());
return false;
}
SDL_AudioSpec desired_spec, obtained_spec;
desired_spec.freq = audio_dec_ctx->sample_rate;
desired_spec.format = AUDIO_S16SYS;
desired_spec.channels = audio_dec_ctx->ch_layout.nb_channels;
desired_spec.samples = 4096;
desired_spec.callback = audio_callback;
desired_spec.userdata = this;
if (SDL_OpenAudio(&desired_spec, &obtained_spec) < 0) {
fprintf(stderr, "Failed to open audio device: %s\n", SDL_GetError());
return false;
}
audio_buffer = nullptr;
audio_buffer_size = 0;
audio_buffer_offset = 0;
SDL_PauseAudio(0);
return true;
}
// 新增:音频设备清理方法
void cleanup_audio_device() {
SDL_CloseAudio();
SDL_QuitSubSystem(SDL_INIT_AUDIO);
printf("SDL audio cleaned up.\n");
}
// 新增:音频回调函数
static void audio_callback(void *userdata, Uint8 *stream, int len) {
Demuxer *demuxer = static_cast<Demuxer *>(userdata);
demuxer->audio_callback_impl(stream, len);
}
// 新增:音频回调实现
void audio_callback_impl(Uint8 *stream, int len) {
if (audio_buffer == nullptr || audio_buffer_offset >= audio_buffer_size) {
// 需要填充新的音频数据
AVFrame *frame = audio_frame_queue.pop();
if (frame) {
if (frame->format == FrameQueue::END_OF_STREAM) {
av_frame_free(&frame);
return;
}
// 将音频帧转换为 PCM 数据
int ret = swr_convert(swr_ctx, &audio_buffer, audio_buffer_size, (const uint8_t **)frame->extended_data, frame->nb_samples);
if (ret < 0) {
fprintf(stderr, "Error converting audio frame: %s\n", av_err2str(ret));
av_frame_free(&frame);
return;
}
audio_buffer_offset = 0;
audio_buffer_size = ret * audio_dec_ctx->ch_layout.nb_channels * av_get_bytes_per_sample((AVSampleFormat)frame->format);
av_frame_free(&frame);
} else {
// 如果队列为空,填充静音数据
SDL_memset(stream, 0, len);
return;
}
}
// 填充音频流
int copy_size = SDL_min(len, audio_buffer_size - audio_buffer_offset);
SDL_memcpy(stream, audio_buffer + audio_buffer_offset, copy_size);
audio_buffer_offset += copy_size;
}
// 新增:视频帧显示方法
void display_video_frame(AVFrame *frame) {
if (!frame || !texture) return;
double current_time = av_gettime_relative() / 1000000.0;
double frame_duration = 1.0 / frame_rate;
// 等待直到可以显示下一帧
while (current_time - last_frame_time < frame_duration) {
SDL_Delay(1);
current_time = av_gettime_relative() / 1000000.0;
}
// 更新纹理数据
SDL_UpdateYUVTexture(texture, nullptr,
frame->data[0], frame->linesize[0],
frame->data[1], frame->linesize[1],
frame->data[2], frame->linesize[2]);
// 清空渲染器并绘制纹理
SDL_RenderClear(renderer);
SDL_RenderCopy(renderer, texture, nullptr, nullptr);
SDL_RenderPresent(renderer);
// 更新最后帧的时间戳
last_frame_time = current_time;
}
int run(int argc, char **argv) {
int ret = 0;
const char *src_filename = "D:\\videoa.mp4";
const char *video_dst_filename = "lost_video.yuv";
const char *audio_dst_filename = "lost_audio.pcm";
/* open input file, and allocate format context */
if (avformat_open_input(&fmt_ctx, src_filename, nullptr, nullptr) < 0) {
fprintf(stderr, "Could not open source file %s\n", src_filename);
return 1; // 直接返回错误码,避免 goto 跳转
}
/* retrieve stream information */
if (avformat_find_stream_info(fmt_ctx, nullptr) < 0) {
fprintf(stderr, "Could not find stream information\n");
return 1; // 直接返回错误码,避免 goto 跳转
}
if (open_codec_context(&video_stream_idx, &video_dec_ctx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
video_stream = fmt_ctx->streams[video_stream_idx];
video_dst_file = fopen(video_dst_filename, "wb");
if (!video_dst_file) {
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
return 1; // 直接返回错误码,避免 goto 跳转
}
/* allocate image where the decoded image will be put */
width = video_dec_ctx->width;
height = video_dec_ctx->height;
pix_fmt = video_dec_ctx->pix_fmt;
ret = av_image_alloc(video_dst_data, video_dst_linesize,
width, height, pix_fmt, 1);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw video buffer\n");
return 1; // 直接返回错误码,避免 goto 跳转
}
video_dst_bufsize = ret;
}
// 调用SDL2初始化方法
if (!init_sdl()) {
return 1;
}
if (open_codec_context(&audio_stream_idx, &audio_dec_ctx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
audio_stream = fmt_ctx->streams[audio_stream_idx];
audio_dst_file = fopen(audio_dst_filename, "wb");
if (!audio_dst_file) {
fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
return 1; // 直接返回错误码,避免 goto 跳转
}
}
/* dump input information to stderr */
av_dump_format(fmt_ctx, 0, src_filename, 0);
if (!audio_stream && !video_stream) {
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
ret = 1;
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate frame\n");
ret = AVERROR(ENOMEM);
}
pkt = av_packet_alloc();
if (!pkt) {
fprintf(stderr, "Could not allocate packet\n");
ret = AVERROR(ENOMEM);
}
if (video_stream)
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
if (audio_stream)
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
// 初始化帧率
calculate_frame_rate();
// 创建读取帧的子线程
pthread_t read_thread;
printf("MAIN-THREAD: Creating read frame thread\n");
if (pthread_create(&read_thread, nullptr, read_frame_thread, this) != 0) {
fprintf(stderr, "Failed to create read frame thread\n");
return 1;
}
// 创建线程处理视频和音频解码
pthread_t video_thread, audio_thread;
if (video_stream) {
printf("MAIN-THREAD: Creating video decode thread\n");
if (pthread_create(&video_thread, nullptr, video_decode_thread, this) != 0) {
fprintf(stderr, "Failed to create video decode thread\n");
return 1;
}
}
if (audio_stream) {
printf("MAIN-THREAD: Creating audio decode thread\n");
if (pthread_create(&audio_thread, nullptr, audio_decode_thread, this) != 0) {
fprintf(stderr, "Failed to create audio decode thread\n");
return 1;
}
}
// 创建写入线程
pthread_t video_write_thread_id, audio_write_thread_id;
if (video_stream) {
printf("MAIN-THREAD: Creating video write thread\n");
if (pthread_create(&video_write_thread_id, nullptr, video_write_thread, this) != 0) {
fprintf(stderr, "Failed to create video write thread\n");
return 1;
}
}
if (audio_stream) {
printf("MAIN-THREAD: Creating audio write thread\n");
init_audio_device();
// if (pthread_create(&audio_write_thread_id, nullptr, audio_write_thread, this) != 0) {
// fprintf(stderr, "Failed to create audio write thread\n");
// return 1;
// }
}
// 主线程等待子线程完成读取任务
printf("MAIN-THREAD: Waiting for read frame thread to finish\n");
if (pthread_join(read_thread, nullptr) != 0) {
fprintf(stderr, "Failed to join read frame thread\n");
return 1;
}
// 等待解码线程结束
if (video_stream) {
printf("MAIN-THREAD: Waiting for video decode thread to finish\n");
if (pthread_join(video_thread, nullptr) != 0) {
fprintf(stderr, "Failed to join video decode thread\n");
return 1;
}
}
if (audio_stream) {
printf("MAIN-THREAD: Waiting for audio decode thread to finish\n");
if (pthread_join(audio_thread, nullptr) != 0) {
fprintf(stderr, "Failed to join audio decode thread\n");
return 1;
}
}
// 等待写入线程结束
if (video_stream) {
printf("MAIN-THREAD: Waiting for video write thread to finish\n");
if (pthread_join(video_write_thread_id, nullptr) != 0) {
fprintf(stderr, "Failed to join video write thread\n");
return 1;
}
}
if (audio_stream) {
printf("MAIN-THREAD: Waiting for audio write thread to finish\n");
// if (pthread_join(audio_write_thread_id, nullptr) != 0) {
// fprintf(stderr, "Failed to join audio write thread\n");
// return 1;
// }
}
printf("MAIN-THREAD: All threads have finished.\n");
// 清理队列
video_pkt_queue.abort();
audio_pkt_queue.abort();
video_frame_queue.abort();
audio_frame_queue.abort();
// 调用SDL2清理方法
cleanup_sdl();
// 清理音频设备
if (audio_stream) {
cleanup_audio_device();
}
// 确保所有资源都被正确释放
avformat_close_input(&fmt_ctx);
if (video_dst_file)
fclose(video_dst_file);
if (audio_dst_file)
fclose(audio_dst_file);
av_packet_free(&pkt);
av_frame_free(&frame);
av_free(video_dst_data[0]);
return ret < 0;
}
private:
AVFormatContext *fmt_ctx;
AVCodecContext *video_dec_ctx, *audio_dec_ctx;
int width, height;
enum AVPixelFormat pix_fmt;
AVStream *video_stream, *audio_stream;
const char *src_filename;
const char *video_dst_filename;
const char *audio_dst_filename;
FILE *video_dst_file;
FILE *audio_dst_file;
uint8_t *video_dst_data[4] = {nullptr};
int video_dst_linesize[4];
int video_dst_bufsize;
int video_stream_idx = -1, audio_stream_idx = -1;
AVFrame *frame = nullptr;
AVPacket *pkt = nullptr;
int video_frame_count;
int audio_frame_count;
// 新增成员变量用于线程安全队列
PacketQueue video_pkt_queue;
PacketQueue audio_pkt_queue;
FrameQueue video_frame_queue;
FrameQueue audio_frame_queue;
// 新增:SDL2窗口和纹理相关成员变量
SDL_Window *window = nullptr;
SDL_Renderer *renderer = nullptr;
SDL_Texture *texture = nullptr;
// 新增:音频相关成员变量
SwrContext *swr_ctx = nullptr;
uint8_t *audio_buffer = nullptr;
int audio_buffer_size = 0;
int audio_buffer_offset = 0;
// 新增:帧率控制相关变量
double frame_rate = 0.0;
double last_frame_time = 0.0;
int output_video_frame(AVFrame *frame) {
if (frame->width != width || frame->height != height ||
frame->format != pix_fmt) {
/* To handle this change, one could call av_image_alloc again and
* decode the following frames into another rawvideo file. */
fprintf(stderr, "Error: Width, height and pixel format have to be "
"constant in a rawvideo file, but the width, height or "
"pixel format of the input video changed:\n"
"old: width = %d, height = %d, format = %s\n"
"new: width = %d, height = %d, format = %s\n",
width, height, av_get_pix_fmt_name((AVPixelFormat)pix_fmt),
frame->width, frame->height,
av_get_pix_fmt_name((AVPixelFormat)frame->format));
return -1;
}
printf("video_frame n:%d\n", video_frame_count++);
/* copy decoded frame to destination buffer:
* this is required since rawvideo expects non aligned data */
av_image_copy2(video_dst_data, video_dst_linesize,
frame->data, frame->linesize,
pix_fmt, width, height);
/* write to rawvideo file */
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
return 0;
}
int output_audio_frame(AVFrame *frame) {
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample((AVSampleFormat)frame->format);
printf("audio_frame n:%d nb_samples:%d pts:%s\n",
audio_frame_count++, frame->nb_samples,
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
/* Write the raw audio data samples of the first plane. This works
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
* most audio decoders output planar audio, which uses a separate
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
* In other words, this code will write only the first audio channel
* in these cases.
* You should use libswresample or libavfilter to convert the frame
* to packed data. */
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
return 0;
}
// 修改 decode_packet 函数,将解码后的帧放入帧队列
int decode_packet(AVCodecContext *dec, const AVPacket *pkt) {
int ret = 0;
// submit the packet to the decoder
ret = avcodec_send_packet(dec, pkt);
if (ret < 0) {
fprintf(stderr, "Error submitting a packet for decoding (%s)\n", av_err2str(ret));
return ret;
}
// get all the available frames from the decoder
while (ret >= 0) {
ret = avcodec_receive_frame(dec, frame);
if (ret < 0) {
// those two return values are special and mean there is no output
// frame available, but there were no errors during decoding
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
return 0;
fprintf(stderr, "Error during decoding (%s)\n", av_err2str(ret));
return ret;
}
// 将解码后的帧放入帧队列
if (dec->codec->type == AVMEDIA_TYPE_VIDEO) {
AVFrame *video_frame = av_frame_clone(frame);
if (video_frame) {
video_frame_queue.push(video_frame);
}
} else {
AVFrame *audio_frame = av_frame_clone(frame);
if (audio_frame) {
audio_frame_queue.push(audio_frame);
}
}
av_frame_unref(frame);
}
return ret;
}
int open_codec_context(int *stream_idx,
AVCodecContext **dec_ctx, AVFormatContext *fmt_ctx, enum AVMediaType type) {
int ret, stream_index;
AVStream *st;
const AVCodec *dec = nullptr;
ret = av_find_best_stream(fmt_ctx, type, -1, -1, nullptr, 0);
if (ret < 0) {
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
av_get_media_type_string(type), src_filename);
return ret;
} else {
stream_index = ret;
st = fmt_ctx->streams[stream_index];
/* find decoder for the stream */
dec = avcodec_find_decoder(st->codecpar->codec_id);
if (!dec) {
fprintf(stderr, "Failed to find %s codec\n",
av_get_media_type_string(type));
return AVERROR(EINVAL);
}
/* Allocate a codec context for the decoder */
*dec_ctx = avcodec_alloc_context3(dec);
if (!*dec_ctx) {
fprintf(stderr, "Failed to allocate the %s codec context\n",
av_get_media_type_string(type));
return AVERROR(ENOMEM);
}
/* Copy codec parameters from input stream to output codec context */
if ((ret = avcodec_parameters_to_context(*dec_ctx, st->codecpar)) < 0) {
fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
av_get_media_type_string(type));
return ret;
}
/* Init the decoders */
if ((ret = avcodec_open2(*dec_ctx, dec, nullptr)) < 0) {
fprintf(stderr, "Failed to open %s codec\n",
av_get_media_type_string(type));
return ret;
}
*stream_idx = stream_index;
}
return 0;
}
int get_format_from_sample_fmt(const char **fmt,
enum AVSampleFormat sample_fmt) {
int i;
struct sample_fmt_entry {
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
} sample_fmt_entries[] = {
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
};
*fmt = nullptr;
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
if (sample_fmt == entry->sample_fmt) {
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
return 0;
}
}
fprintf(stderr,
"sample format %s is not supported as output format\n",
av_get_sample_fmt_name(sample_fmt));
return -1;
}
// 新增:计算帧率的方法
void calculate_frame_rate() {
if (video_stream && video_stream->avg_frame_rate.num > 0 && video_stream->avg_frame_rate.den > 0) {
frame_rate = av_q2d(video_stream->avg_frame_rate);
} else {
frame_rate = 25.0; // 默认帧率为 25 FPS
}
}
};
int main(int argc, char **argv) {
Demuxer demuxer;
return demuxer.run(argc, argv);
}
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。