JackLee_CN
1 year ago
24 changed files with 1096 additions and 831 deletions
@ -0,0 +1,51 @@
|
||||
#include "MediaAVDecoder.h" |
||||
|
||||
MediaAVDecoder::MediaAVDecoder() |
||||
{ |
||||
m_AVCodec_Context = avcodec_alloc_context3(nullptr); |
||||
} |
||||
MediaAVDecoder::~MediaAVDecoder() |
||||
{ |
||||
if (m_AVCodec_Context) |
||||
{ |
||||
avcodec_free_context(&m_AVCodec_Context); |
||||
m_AVCodec_Context = nullptr; |
||||
} |
||||
} |
||||
int MediaAVDecoder::init(MeidaAVStream *stream){ |
||||
avcodec_parameters_to_context(m_AVCodec_Context,stream->m_codecPar); |
||||
AVCodec *codec= (AVCodec *)avcodec_find_decoder(m_AVCodec_Context->codec_id); |
||||
|
||||
AVDictionary *opts = NULL; |
||||
char refcount = 1; |
||||
av_dict_set(&opts, "refcounted_frames", &refcount, 0); |
||||
|
||||
auto ret = avcodec_open2(m_AVCodec_Context,codec, &opts); |
||||
if (ret) |
||||
{ |
||||
printf("open codec error"); |
||||
} |
||||
return 0; |
||||
} |
||||
|
||||
int MediaAVDecoder::SendAVPackt(MediaAVPacket *pkt) |
||||
{ |
||||
int ret = 0; |
||||
if (pkt == nullptr) |
||||
{ |
||||
ret = avcodec_send_packet(m_AVCodec_Context, nullptr); |
||||
} |
||||
else |
||||
{ |
||||
ret = avcodec_send_packet(m_AVCodec_Context, pkt->m_pakcet); |
||||
} |
||||
return ret; |
||||
} |
||||
int MediaAVDecoder::ReceiveAVFrame(MediaAVFrame *frame) |
||||
{ |
||||
auto ret = avcodec_receive_frame(m_AVCodec_Context, frame->m_frame); |
||||
return ret; |
||||
} |
||||
AVCodecContext* MediaAVDecoder::GetAVCodecContext(){ |
||||
return m_AVCodec_Context; |
||||
} |
@ -0,0 +1,21 @@
|
||||
#ifndef MEDIAAVDECODER_H |
||||
#define MEDIAAVDECODER_H |
||||
|
||||
#include "MediaAVStream.h" |
||||
#include "MediaAVPacket.h" |
||||
#include "MediaAVFrame.h" |
||||
#include <libavformat/avformat.h> |
||||
#include <libavcodec/avcodec.h> |
||||
#include <QDebug> |
||||
class MediaAVDecoder{ |
||||
public: |
||||
MediaAVDecoder(); |
||||
~MediaAVDecoder(); |
||||
int init(MeidaAVStream *stream);
|
||||
int SendAVPackt(MediaAVPacket *pkt); |
||||
int ReceiveAVFrame(MediaAVFrame *frame); |
||||
AVCodecContext* GetAVCodecContext(); |
||||
private: |
||||
AVCodecContext* m_AVCodec_Context; |
||||
}; |
||||
#endif |
@ -0,0 +1,9 @@
|
||||
#include "MediaAVFrame.h" |
||||
|
||||
MediaAVFrame::MediaAVFrame(){ |
||||
m_frame = av_frame_alloc(); |
||||
} |
||||
MediaAVFrame::~MediaAVFrame(){ |
||||
av_frame_free(&m_frame); |
||||
m_frame = nullptr; |
||||
} |
@ -0,0 +1,19 @@
|
||||
#ifndef MEDIAAVFRAME_H |
||||
#define MEDIAAVFRAME_H |
||||
|
||||
extern "C" |
||||
{ |
||||
#include <libavcodec/avcodec.h> |
||||
#include <libavformat/avformat.h> |
||||
#include <libavutil/avutil.h> |
||||
}; |
||||
|
||||
class MediaAVFrame { |
||||
public: |
||||
AVFrame *m_frame; |
||||
MediaAVFrame(); |
||||
~MediaAVFrame(); |
||||
}; |
||||
|
||||
|
||||
#endif //MEDIAAVFRAME_H
|
@ -0,0 +1,15 @@
|
||||
#include <stdio.h> |
||||
#include "MediaAVPacket.h" |
||||
|
||||
MediaAVPacket::MediaAVPacket() { |
||||
m_pakcet = av_packet_alloc(); |
||||
if (m_pakcet==nullptr) |
||||
{ |
||||
printf("av_packet_alloc error\n"); |
||||
} |
||||
} |
||||
|
||||
MediaAVPacket::~MediaAVPacket() { |
||||
av_packet_free(&m_pakcet); |
||||
//printf("av_packet_free\n");
|
||||
} |
@ -0,0 +1,18 @@
|
||||
#ifndef MEDIAAVPACKET_H |
||||
#define MEDIAAVPACKET_H |
||||
|
||||
extern "C" |
||||
{ |
||||
#include <libavformat/avformat.h> |
||||
#include <libavcodec/avcodec.h> |
||||
}; |
||||
|
||||
class MediaAVPacket { |
||||
public: |
||||
MediaAVPacket(); |
||||
~MediaAVPacket(); |
||||
AVPacket *m_pakcet; |
||||
}; |
||||
|
||||
|
||||
#endif //MEDIAAVPACKET_H
|
@ -0,0 +1,17 @@
|
||||
#ifndef MEDIASTREAM_H |
||||
#define MEDIASTREAM_H |
||||
|
||||
extern "C" |
||||
{ |
||||
#include <libavcodec/avcodec.h> |
||||
#include <libavformat/avformat.h> |
||||
}; |
||||
class MeidaAVStream { |
||||
public: |
||||
|
||||
AVCodecParameters *m_codecPar = nullptr; |
||||
int m_stream_index=-1; |
||||
}; |
||||
|
||||
|
||||
#endif //MEDIASTREAM_H
|
@ -1,197 +0,0 @@
|
||||
#include "SDL2Player.h" |
||||
|
||||
SDL2Player::SDL2Player() : d_width(640), |
||||
d_height(480), |
||||
d_Title("SDL player"), |
||||
st_exit(0), |
||||
st_pause(0), |
||||
sd_time(25), |
||||
stbffmpeg(new STB_FFmpeg()) |
||||
{ |
||||
} |
||||
SDL2Player::~SDL2Player() |
||||
{ |
||||
SDL_DestroyTexture(sdl_texture); |
||||
// SDL_FreeSurface(pic);
|
||||
SDL_DestroyRenderer(sdl_rander); |
||||
SDL_DestroyWindow(sdl_window); |
||||
SDL_Quit(); |
||||
delete stbffmpeg; |
||||
stbffmpeg = nullptr; |
||||
} |
||||
|
||||
void SDL2Player::set_window_size(int _width, int _height) |
||||
{ |
||||
d_width = _width; |
||||
d_height = _height; |
||||
} |
||||
int SDL2Player::getWidth() |
||||
{ |
||||
return d_width; |
||||
} |
||||
int SDL2Player::getHeight() |
||||
{ |
||||
return d_height; |
||||
} |
||||
void SDL2Player::set_player_title(QString t_str) |
||||
{ |
||||
d_Title = t_str; |
||||
} |
||||
|
||||
int SDL2Player::initPlayer(void *n_winId) |
||||
{ |
||||
if (!n_winId) |
||||
{ |
||||
return -1; |
||||
} |
||||
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) |
||||
{ |
||||
printf("Could not initialize SDL - %s\n", SDL_GetError()); |
||||
return -1; |
||||
} |
||||
SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "1"); |
||||
if (n_winId != NULL) |
||||
{ |
||||
sdl_window = SDL_CreateWindowFrom(n_winId); |
||||
} |
||||
else |
||||
{ |
||||
sdl_window = SDL_CreateWindow(d_Title.toStdString().c_str(), SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, |
||||
d_width, d_height, SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE | SDL_WINDOW_ALLOW_HIGHDPI); |
||||
} |
||||
if (!sdl_window) |
||||
{ |
||||
printf("SDL: could not create window - exiting:%s\n", SDL_GetError()); |
||||
return -1; |
||||
}
|
||||
SDL_CreateThread(SDL2Player::refresh_video_thread, NULL, this); |
||||
std::cout<<"CreateThread Done!"<<std::endl; |
||||
return 1; |
||||
} |
||||
|
||||
void SDL2Player::switch_screen_texture(SDL2ScreenMode sdl2Type) |
||||
{ |
||||
if (sdl_texture) |
||||
{ |
||||
SDL_DestroyTexture(sdl_texture); |
||||
sdl_texture=nullptr; |
||||
} |
||||
if (sdl_rander) |
||||
{ |
||||
SDL_DestroyRenderer(sdl_rander); |
||||
sdl_rander=nullptr; |
||||
} |
||||
|
||||
sdl_rander = SDL_CreateRenderer(sdl_window, -1, SDL_RENDERER_TARGETTEXTURE); |
||||
SDL_SetRenderDrawBlendMode(sdl_rander, SDL_BLENDMODE_BLEND); |
||||
SDL_ShowWindow(sdl_window); |
||||
if (sdl2Type==DEFAULT_MODE) |
||||
{ |
||||
sdl_surface = stbffmpeg->default_logo_surface(logoImageStr); |
||||
if (sdl_surface) |
||||
{ |
||||
sdl_texture = SDL_CreateTextureFromSurface(sdl_rander, sdl_surface); |
||||
} |
||||
stbffmpeg->setFontArgs(sdl_rander); |
||||
}else{ |
||||
sdl_texture = SDL_CreateTexture(sdl_rander, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, d_width, d_height); |
||||
} |
||||
} |
||||
void SDL2Player::default_screen_texture() |
||||
{ |
||||
SDL_WaitEvent(&event); |
||||
int iW, iH, iWidth, iHeight; |
||||
SDL_QueryTexture(sdl_texture, NULL, NULL, &iW, &iH); |
||||
SDL_GetWindowSize(sdl_window, &iWidth, &iHeight); |
||||
sdlRect.x = iWidth / 2 - iW / 2; |
||||
sdlRect.y = iHeight / 2 - iH / 2; |
||||
sdlRect.w = iW; |
||||
sdlRect.h = iH; |
||||
|
||||
SDL_RenderClear(sdl_rander); |
||||
SDL_RenderCopy(sdl_rander, sdl_texture, NULL, &sdlRect); |
||||
|
||||
sdlFontRect.x = (iWidth / 2 - iW / 2) - stbffmpeg->fc.faceSize; |
||||
sdlFontRect.y = iHeight - 50; |
||||
|
||||
stbffmpeg->fc.drawText(sdlFontRect.x, sdlFontRect.y, logoStr); |
||||
SDL_RenderPresent(sdl_rander); |
||||
} |
||||
int SDL2Player::PlayYUV(uint8_t *buffer, SDL_Rect sdlRect, int delayTime) |
||||
{ |
||||
SDL_WaitEvent(&event); |
||||
if (event.type == REFRESH_EVENT) |
||||
{ |
||||
SDL_UpdateTexture(sdl_texture, NULL, buffer, d_width); |
||||
SDL_RenderClear(sdl_rander); |
||||
SDL_RenderCopy(sdl_rander, sdl_texture, NULL, &sdlRect); |
||||
SDL_RenderPresent(sdl_rander); |
||||
// Delay 40ms
|
||||
sd_time = delayTime; |
||||
} |
||||
else if (event.type == SDL_QUIT) |
||||
{ |
||||
this->threadExit = 1; |
||||
} |
||||
else if (event.type == BREAK_EVENT) |
||||
{ |
||||
return -1; |
||||
} |
||||
else if (event.type == SDL_WINDOWEVENT) |
||||
{ |
||||
// If Resize
|
||||
SDL_GetWindowSize(sdl_window, &d_width, &d_height); |
||||
} |
||||
return 0; |
||||
} |
||||
|
||||
int SDL2Player::PlayYUV(uint8_t *buffer, int delayTime) |
||||
{ |
||||
|
||||
SDL_WaitEvent(&event); |
||||
if (event.type == REFRESH_EVENT) |
||||
{ |
||||
SDL_UpdateTexture(sdl_texture, NULL, buffer, d_width); |
||||
sdlRect.x = 0; |
||||
sdlRect.y = 0; |
||||
sdlRect.w = d_width; |
||||
sdlRect.h = d_height; |
||||
|
||||
SDL_RenderClear(sdl_rander); |
||||
SDL_RenderCopy(sdl_rander, sdl_texture, NULL, &sdlRect); |
||||
SDL_RenderPresent(sdl_rander); |
||||
// Delay 40ms
|
||||
sd_time = delayTime; |
||||
} |
||||
else if (event.type == SDL_QUIT) |
||||
{ |
||||
threadExit = 1; |
||||
} |
||||
else if (event.type == SDL_WINDOWEVENT) |
||||
{ |
||||
// If Resize
|
||||
SDL_GetWindowSize(sdl_window, &d_width, &d_height); |
||||
} |
||||
else if (event.type == BREAK_EVENT) |
||||
{ |
||||
return -1; |
||||
} |
||||
return 0; |
||||
} |
||||
int SDL2Player::refresh_video_thread(void *opaque) |
||||
{ |
||||
SDL2Player *sdl = (SDL2Player *)opaque; |
||||
SDL_Event _event; |
||||
while (!sdl->st_exit) |
||||
{ |
||||
if (!sdl->st_pause) |
||||
{ |
||||
_event.type = REFRESH_EVENT; |
||||
SDL_PushEvent(&_event); |
||||
SDL_Delay(sdl->sd_time); |
||||
} |
||||
} |
||||
_event.type = BREAK_EVENT; |
||||
SDL_PushEvent(&_event); |
||||
return 0; |
||||
} |
@ -1,64 +0,0 @@
|
||||
#ifndef SDL2PLAYER_H |
||||
#define SDL2PLAYER_H |
||||
|
||||
#include <iostream> |
||||
#include <fstream> |
||||
#include <QWidget> |
||||
#include <QDir> |
||||
|
||||
#define SDL_MAIN_HANDLED |
||||
|
||||
#include "SDL2/SDL.h" |
||||
|
||||
#include "stb_ffmpeg.h" |
||||
|
||||
//Refresh Event
|
||||
#define REFRESH_EVENT (SDL_USEREVENT + 1) |
||||
//Break
|
||||
#define BREAK_EVENT (SDL_USEREVENT + 2) |
||||
|
||||
enum SDL2ScreenMode{ |
||||
DEFAULT_MODE, |
||||
VOIDE_MODE |
||||
}; |
||||
class SDL2Player |
||||
{ |
||||
public: |
||||
SDL2Player(); |
||||
~SDL2Player(); |
||||
int initPlayer(void* winId); |
||||
void set_window_size(int t_width,int t_height); |
||||
void set_player_title(QString); |
||||
void switch_screen_texture(SDL2ScreenMode sdl2Type); |
||||
void default_screen_texture(); |
||||
int PlayYUV(uint8_t* buffer, int delayTime = 40); |
||||
int PlayYUV(uint8_t* buffer, SDL_Rect sdlRect, int delayTime); |
||||
int getWidth(); |
||||
int getHeight(); |
||||
private: |
||||
static int refresh_video_thread(void* opaque); |
||||
private: |
||||
int d_width; |
||||
int d_height; |
||||
|
||||
QString d_Title; |
||||
|
||||
STB_FFmpeg* stbffmpeg; |
||||
//SDL WINDOW
|
||||
SDL_Window* sdl_window; |
||||
SDL_Texture* sdl_texture; |
||||
SDL_Renderer* sdl_rander; |
||||
SDL_Surface *sdl_surface; |
||||
SDL_Rect sdlRect,sdlFontRect;
|
||||
//事件
|
||||
SDL_Event event; |
||||
int threadExit; |
||||
int st_exit; |
||||
int st_pause; |
||||
int sd_time; |
||||
//默认字符串
|
||||
std::string logoImageStr="F:/SourceCode/VTS/ZFFmpeg/ZFFmpeg/res/img/zvo.png"; |
||||
std::string logoStr="新时代社会主义中国"; |
||||
}; |
||||
|
||||
#endif // SDL2PLAYER_H
|
@ -0,0 +1,95 @@
|
||||
#include "SDL2RenderWidget.h" |
||||
|
||||
SDL2RenderWidget::SDL2RenderWidget(QWidget *parent /*= nullptr*/) |
||||
:QWidget(parent) |
||||
{ |
||||
|
||||
//setUpdatesEnabled(false);
|
||||
char winID[32] = { 0 }; |
||||
QSize size = this->baseSize(); |
||||
|
||||
//B1. 初始化SDL子系统:缺省(事件处理、文件IO、线程)、视频、音频、定时器
|
||||
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER) < 0) |
||||
{ |
||||
printf("SDL could not initialize! SDL_Error: %s\n", SDL_GetError()); |
||||
} |
||||
|
||||
// B2. 创建SDL窗口,SDL 2.0支持多窗口
|
||||
m_sdl_window = SDL_CreateWindowFrom((void*)(this->winId())); |
||||
|
||||
|
||||
// B3. 创建SDL_Renderer
|
||||
// SDL_Renderer:渲染器
|
||||
m_sdl_renderer = SDL_CreateRenderer(m_sdl_window, -1, 0); |
||||
} |
||||
|
||||
SDL2RenderWidget::~SDL2RenderWidget() |
||||
{ |
||||
//销毁 window
|
||||
SDL_DestroyWindow(m_sdl_window); |
||||
//退出 SDL subsystems
|
||||
SDL_Quit(); |
||||
} |
||||
|
||||
void SDL2RenderWidget::updateImage(std::shared_ptr<MediaAVFrame> yuv_frame) |
||||
{ |
||||
int nTextureWidth = 0, nTextureHeight = 0; |
||||
//首先查询当前纹理对象的宽高,如果不符合,那么需要重建纹理对象
|
||||
SDL_QueryTexture(m_sdl_texture, nullptr, nullptr, &nTextureWidth, &nTextureHeight); |
||||
setMinimumSize(nTextureWidth,nTextureHeight); |
||||
//B4 SDL_CreateTexture
|
||||
if (nTextureWidth != yuv_frame->m_frame->width || nTextureHeight != yuv_frame->m_frame->height) { |
||||
if (m_sdl_texture) |
||||
SDL_DestroyTexture(m_sdl_texture); |
||||
//这里指定了渲染的数据格式,访问方式和宽高大小
|
||||
m_sdl_texture = SDL_CreateTexture(m_sdl_renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, |
||||
yuv_frame->m_frame->width, yuv_frame->m_frame->height); |
||||
} |
||||
m_sdl_rect.x=0; |
||||
m_sdl_rect.y=0; |
||||
m_sdl_rect.w=nTextureWidth; |
||||
m_sdl_rect.w=nTextureHeight; |
||||
//B5.使用新的YUV像素数据更新SDL_Rect
|
||||
SDL_UpdateYUVTexture(m_sdl_texture, // sdl texture
|
||||
&m_sdl_rect, // sdl rect
|
||||
yuv_frame->m_frame->data[0], // y plane
|
||||
yuv_frame->m_frame->linesize[0], // y pitch
|
||||
yuv_frame->m_frame->data[1], // u plane
|
||||
yuv_frame->m_frame->linesize[1], // u pitch
|
||||
yuv_frame->m_frame->data[2], // v plane
|
||||
yuv_frame->m_frame->linesize[2] // v pitch
|
||||
); |
||||
|
||||
|
||||
// B6. 使用特定颜色清空当前渲染目标
|
||||
SDL_RenderClear(m_sdl_renderer); |
||||
|
||||
// B7. 使用部分图像数据(texture)更新当前渲染目标
|
||||
SDL_RenderCopy(m_sdl_renderer, // sdl renderer
|
||||
m_sdl_texture, // sdl texture
|
||||
NULL, // src rect, if NULL copy texture
|
||||
NULL // dst rect
|
||||
); |
||||
|
||||
// B8. 执行渲染,更新屏幕显示
|
||||
SDL_RenderPresent(m_sdl_renderer); |
||||
|
||||
// B9. 控制帧率为25FPS,此处不够准确,未考虑解码消耗的时间
|
||||
SDL_Delay(40); |
||||
} |
||||
|
||||
SDL_AudioDeviceID SDL2RenderWidget::openAudioDevice(SDL_AudioSpec *spec) |
||||
{ |
||||
SDL_AudioSpec have; |
||||
SDL_AudioDeviceID dev = SDL_OpenAudioDevice(NULL, 0, spec, &have, SDL_AUDIO_ALLOW_FORMAT_CHANGE); |
||||
if (dev == 0) { |
||||
SDL_Log("Failed to open audio: %s", SDL_GetError()); |
||||
} |
||||
else { |
||||
if (have.format != spec->format) { /* we let this one thing change. */ |
||||
SDL_Log("We didn't get Float32 audio format."); |
||||
} |
||||
} |
||||
SDL_PauseAudioDevice(dev, 0); |
||||
return dev; |
||||
} |
@ -0,0 +1,24 @@
|
||||
#ifndef SDL2RENDERWIDGET_H |
||||
#define SDL2RENDERWIDGET_H |
||||
#include <QWidget> |
||||
#include <memory> |
||||
#include "MediaAVFrame.h" |
||||
#include "SDL2/SDL.h" |
||||
|
||||
|
||||
class SDL2RenderWidget : public QWidget |
||||
{ |
||||
Q_OBJECT |
||||
public: |
||||
SDL2RenderWidget(QWidget *parent = nullptr); |
||||
~SDL2RenderWidget(); |
||||
void updateImage(std::shared_ptr<MediaAVFrame> frame); |
||||
SDL_AudioDeviceID openAudioDevice(SDL_AudioSpec * spec); |
||||
private: |
||||
SDL_Window* m_sdl_window = nullptr; |
||||
SDL_Renderer* m_sdl_renderer = nullptr; |
||||
SDL_Texture* m_sdl_texture = nullptr; |
||||
SDL_Rect m_sdl_rect; |
||||
}; |
||||
|
||||
#endif |
@ -1,275 +0,0 @@
|
||||
#include "ffmpeg.h" |
||||
|
||||
FFmpeg::FFmpeg() |
||||
{ |
||||
} |
||||
|
||||
FFmpeg::FFmpeg(std::string path) |
||||
{ |
||||
} |
||||
|
||||
INITERRO FFmpeg::initFFmpeg(const std::string &path) |
||||
{ |
||||
|
||||
if (path.empty() && filePath.empty()) |
||||
{ |
||||
return INITERRO::INIT_ERRO_OPEN_FAILED; |
||||
} |
||||
//优先选用传入的参数
|
||||
std::string pathTemp = path.empty() ? filePath : path; |
||||
//初始化
|
||||
unsigned codecVer = avcodec_version(); |
||||
std::string FFMPEG_VERSION="6.0"; |
||||
printf("FFmpeg version is: %s, avcodec version is: %d\n.",FFMPEG_VERSION,codecVer); |
||||
|
||||
avformat_network_init(); |
||||
pFormatCtx = avformat_alloc_context(); |
||||
// open_input
|
||||
if (avformat_open_input(&pFormatCtx, pathTemp.c_str(), NULL, NULL) != 0) |
||||
{ |
||||
return INITERRO::INIT_ERRO_OPEN_FAILED; |
||||
} |
||||
// find_stream_infomation
|
||||
if (avformat_find_stream_info(pFormatCtx, NULL) < 0) |
||||
{ |
||||
return INITERRO::INIT_ERRO_FIND_FAILED; |
||||
} |
||||
av_dump_format(pFormatCtx, 100, pathTemp.c_str(), false); |
||||
videoIndex = av_find_best_stream(pFormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); |
||||
audioIndex = av_find_best_stream(pFormatCtx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0); |
||||
subtitleIndex = av_find_best_stream(pFormatCtx, AVMEDIA_TYPE_SUBTITLE, -1, -1, NULL, 0); |
||||
std::cout << "videoIndex:" << videoIndex << std::endl; |
||||
std::cout << "audioIndex:" << audioIndex << std::endl; |
||||
std::cout << "audioIndex:" << subtitleIndex << std::endl; |
||||
// find_decoder
|
||||
if (videoIndex < 0) |
||||
{ |
||||
printf("Didn't find a video stream.\n"); |
||||
} |
||||
else |
||||
{ |
||||
pCodercCtxVideo = avcodec_alloc_context3(NULL); |
||||
} |
||||
|
||||
if (audioIndex < 0) |
||||
{ |
||||
printf("Didn't find a audio stream.\n"); |
||||
} |
||||
else |
||||
{ |
||||
pCodercCtxAudio = avcodec_alloc_context3(NULL); |
||||
} |
||||
|
||||
if (!pCodercCtxVideo) |
||||
{ |
||||
printf("Could not allocate AVCodecContext\n"); |
||||
} |
||||
else |
||||
{ |
||||
avcodec_parameters_to_context(pCodercCtxVideo, pFormatCtx->streams[videoIndex]->codecpar); |
||||
pCodec_Video = (AVCodec *)avcodec_find_decoder(pCodercCtxVideo->codec_id); |
||||
} |
||||
|
||||
if (!pCodercCtxAudio) |
||||
{ |
||||
printf("Could not allocate AVCodecContext\n"); |
||||
} |
||||
else |
||||
{ |
||||
avcodec_parameters_to_context(pCodercCtxAudio, pFormatCtx->streams[audioIndex]->codecpar); |
||||
pCodec_Audio = (AVCodec *)avcodec_find_decoder(pCodercCtxAudio->codec_id); |
||||
} |
||||
|
||||
if (!pCodec_Audio) |
||||
{ |
||||
printf("Video Codec not found.\n"); |
||||
} |
||||
else |
||||
{ |
||||
if (avcodec_open2(pCodercCtxAudio, pCodec_Audio, NULL) < 0) |
||||
{ |
||||
printf("Could not open codec.\n"); |
||||
} |
||||
} |
||||
|
||||
if (!pCodec_Video) |
||||
{ |
||||
printf("Video Codec not found.\n"); |
||||
} |
||||
else |
||||
{ |
||||
if (avcodec_open2(pCodercCtxVideo, pCodec_Video, NULL) < 0) |
||||
{ |
||||
printf("Could not open codec.\n"); |
||||
} |
||||
} |
||||
//========================================
|
||||
// Video Stream
|
||||
//========================================
|
||||
pFrameYUV = av_frame_alloc(); |
||||
pFrame = av_frame_alloc(); |
||||
pkt = (AVPacket *)av_malloc(sizeof(AVPacket)); |
||||
av_init_packet(pkt); |
||||
|
||||
uint8_t *out_buffer_video = (uint8_t *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodercCtxVideo->width, pCodercCtxVideo->height, 1)); |
||||
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer_video, AV_PIX_FMT_YUV420P, pCodercCtxVideo->width, pCodercCtxVideo->height, 1); |
||||
img_convert_ctx=sws_alloc_context(); |
||||
img_convert_ctx = sws_getContext(pCodercCtxVideo->width, pCodercCtxVideo->height, pCodercCtxVideo->pix_fmt, |
||||
pCodercCtxVideo->width, pCodercCtxVideo->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
|
||||
std::cout << "pix_fmt:" << pCodercCtxVideo->pix_fmt << std::endl; |
||||
std::cout << "width:" << pCodercCtxVideo->width << " height:" << pCodercCtxVideo->height << std::endl; |
||||
printf("init voide done!\n"); |
||||
|
||||
//=====================================
|
||||
// Audio Stream
|
||||
//=====================================
|
||||
// Out Audio Param
|
||||
uint64_t out_channel_layout = AV_CH_LAYOUT_STEREO; |
||||
// nb_samples: AAC-1024 MP3-1152
|
||||
int out_nb_samples = pCodercCtxAudio->frame_size; |
||||
AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16; |
||||
int out_sample_rate = 44100; |
||||
int out_channels = av_get_channel_layout_nb_channels(out_channel_layout); |
||||
// Out Buffer Size
|
||||
out_buffer_size = av_samples_get_buffer_size(NULL, out_channels, out_nb_samples, out_sample_fmt, 1); |
||||
|
||||
out_buffer_audio = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE * 2); |
||||
// SDL------------------
|
||||
|
||||
// SDL_AudioSpec
|
||||
SDL_AudioSpec wanted_spec; |
||||
wanted_spec.freq = out_sample_rate; |
||||
wanted_spec.format = AUDIO_S16SYS; |
||||
wanted_spec.channels = out_channels; |
||||
wanted_spec.silence = 0; |
||||
wanted_spec.samples = out_nb_samples; |
||||
wanted_spec.callback = audioCallBack; |
||||
wanted_spec.userdata = pCodercCtxAudio; |
||||
|
||||
if (SDL_OpenAudio(&wanted_spec, NULL) < 0) |
||||
{ |
||||
printf("can't open audio.\n"); |
||||
} |
||||
|
||||
// FIX:Some Codec's Context Information is missing
|
||||
int64_t in_channel_layout = av_get_default_channel_layout(pCodercCtxAudio->channels); |
||||
// Swr
|
||||
|
||||
aux_convert_ctx = swr_alloc(); |
||||
aux_convert_ctx = swr_alloc_set_opts(aux_convert_ctx, out_channel_layout, out_sample_fmt, out_sample_rate, |
||||
in_channel_layout, pCodercCtxAudio->sample_fmt, pCodercCtxAudio->sample_rate, 0, NULL); |
||||
swr_init(aux_convert_ctx); |
||||
|
||||
// Play
|
||||
SDL_PauseAudio(0); |
||||
|
||||
return INITERRO::INIT_ERRO_NO_ERRO; |
||||
} |
||||
static int audio_len; |
||||
static Uint8 *audio_pos; |
||||
void FFmpeg::audioCallBack(void *, Uint8 *stream, int len) |
||||
{ |
||||
SDL_memset(stream, 0, len); |
||||
if (audio_len == 0) |
||||
return; |
||||
|
||||
len = (len > audio_len ? audio_len : len); /* Mix as much data as possible */ |
||||
|
||||
SDL_MixAudio(stream, audio_pos, len, SDL_MIX_MAXVOLUME); |
||||
audio_pos += len; |
||||
audio_len -= len; |
||||
} |
||||
void FFmpeg::setSize(int _width, int _height) |
||||
{ |
||||
d_width = _width; |
||||
d_height = _height; |
||||
} |
||||
std::shared_ptr<uint8_t> FFmpeg::GetFrameYUV(int &isVideo) |
||||
{ |
||||
int y_size = pCodercCtxVideo->width * pCodercCtxVideo->height; |
||||
std::shared_ptr<uint8_t> bufferFrame(new uint8_t[y_size * 3 / 2]); |
||||
int ret = -1; |
||||
int got_picture = -1; |
||||
if (av_read_frame(pFormatCtx, pkt) < 0) |
||||
{ |
||||
isVideo = -2; |
||||
return bufferFrame; |
||||
} |
||||
if (pkt->stream_index == videoIndex) |
||||
{ |
||||
ret = avcodec_send_packet(pCodercCtxVideo, pkt); |
||||
if (ret != 0) |
||||
{ |
||||
printf("Decode Error.\n"); |
||||
} |
||||
got_picture = avcodec_receive_frame(pCodercCtxVideo, pFrame); |
||||
if (!got_picture) |
||||
{ |
||||
sws_scale(img_convert_ctx, (const uint8_t *const *)pFrame->data, pFrame->linesize, 0, |
||||
pCodercCtxVideo->height, pFrameYUV->data, pFrameYUV->linesize); |
||||
|
||||
memcpy(bufferFrame.get(), pFrameYUV->data[0], y_size); |
||||
memcpy(bufferFrame.get() + y_size, pFrameYUV->data[1], y_size / 4); |
||||
memcpy(bufferFrame.get() + y_size + y_size / 4, pFrameYUV->data[2], y_size / 4); |
||||
isVideo = 1; |
||||
} |
||||
else |
||||
{ |
||||
isVideo = -1; |
||||
} |
||||
} |
||||
else if (pkt->stream_index == audioIndex) |
||||
{ |
||||
ret = avcodec_send_packet(pCodercCtxAudio, pkt); |
||||
if (ret != 0) |
||||
{ |
||||
printf("Error in decoding audio frame.\n"); |
||||
} |
||||
got_picture = avcodec_receive_frame(pCodercCtxAudio, pFrame); |
||||
if (!got_picture) |
||||
{ |
||||
swr_convert(aux_convert_ctx, &out_buffer_audio, MAX_AUDIO_FRAME_SIZE, (const uint8_t **)pFrame->data, pFrame->nb_samples); |
||||
while (audio_len > 0) |
||||
{ // Wait until finish
|
||||
SDL_Delay(1); |
||||
} |
||||
// Set audio buffer (PCM data)
|
||||
// Audio buffer length
|
||||
audio_len = out_buffer_size; |
||||
audio_pos = (Uint8 *)out_buffer_audio; |
||||
isVideo = 1; |
||||
} |
||||
else |
||||
{ |
||||
isVideo = -1; |
||||
} |
||||
} |
||||
else |
||||
{ |
||||
isVideo = 1; |
||||
} |
||||
av_packet_unref(pkt); |
||||
return bufferFrame; |
||||
} |
||||
|
||||
int FFmpeg::GetWidth() |
||||
{ |
||||
return pCodercCtxVideo->width; |
||||
} |
||||
|
||||
int FFmpeg::GetHeight() |
||||
{ |
||||
return pCodercCtxVideo->height; |
||||
} |
||||
|
||||
FFmpeg::~FFmpeg() |
||||
{ |
||||
swr_free(&aux_convert_ctx); |
||||
sws_freeContext(img_convert_ctx); |
||||
|
||||
av_frame_free(&pFrameYUV); |
||||
av_frame_free(&pFrame); |
||||
avcodec_close(pCodercCtxVideo); |
||||
avcodec_close(pCodercCtxAudio); |
||||
avformat_close_input(&pFormatCtx); |
||||
} |
@ -1,47 +0,0 @@
|
||||
#ifndef FFMPEG_H |
||||
#define FFMPEG_H |
||||
#include "ffmpegex.h" |
||||
#include <string> |
||||
#include <memory> |
||||
#include <iostream> |
||||
#include "SDL2/SDL.h" |
||||
|
||||
#define MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
|
||||
|
||||
class FFmpeg |
||||
{ |
||||
public: |
||||
FFmpeg(); |
||||
FFmpeg(std::string path); |
||||
INITERRO initFFmpeg(const std::string& path); |
||||
std::shared_ptr<uint8_t> GetFrameYUV(int& isVideo); |
||||
int GetWidth(); |
||||
int GetHeight(); |
||||
~FFmpeg(); |
||||
static void audioCallBack(void *,Uint8 *stream,int len); |
||||
void setSize(int _width,int _height); |
||||
private: |
||||
std::string filePath; |
||||
int d_width; |
||||
int d_height; |
||||
int videoIndex=-1; |
||||
int audioIndex=-1; |
||||
int subtitleIndex=-1; |
||||
|
||||
AVFormatContext* pFormatCtx; |
||||
|
||||
AVCodecContext* pCodercCtxVideo; |
||||
AVCodec *pCodec_Video; |
||||
|
||||
AVCodecContext* pCodercCtxAudio; |
||||
AVCodec *pCodec_Audio; |
||||
int out_buffer_size; |
||||
uint8_t *out_buffer_audio; |
||||
|
||||
AVFrame* pFrame; |
||||
AVFrame* pFrameYUV; |
||||
AVPacket* pkt; |
||||
SwsContext* img_convert_ctx; |
||||
SwrContext* aux_convert_ctx; |
||||
}; |
||||
#endif |
@ -1,29 +0,0 @@
|
||||
#ifndef FFMPEGEX_H |
||||
#define FFMPEGEX_H |
||||
#define __STDC_CONSTANT_MACROS |
||||
#ifdef __cplusplus |
||||
extern "C" |
||||
{ |
||||
#endif |
||||
#include "libavcodec/avcodec.h" |
||||
#include "libavformat/avformat.h" |
||||
#include "libswscale/swscale.h" |
||||
#include "libswresample/swresample.h" |
||||
#include "libavutil/imgutils.h" |
||||
#include "libavutil/time.h" |
||||
#ifdef __cplusplus |
||||
} |
||||
#endif |
||||
|
||||
enum class INITERRO { |
||||
INIT_ERRO_NO_ERRO, |
||||
INIT_ERRO_ALLOC_FAILED, |
||||
INIT_ERRO_OPEN_FAILED, |
||||
INIT_ERRO_FIND_FAILED, |
||||
INIT_ERRO_NO_VIDEO, |
||||
INIT_ERRO_NO_AUDIO, |
||||
INIT_ERRO_DECODE_FAILED, |
||||
INIT_ERRO_OPEN2_FAILED |
||||
}; |
||||
|
||||
#endif |
@ -1,74 +1,698 @@
|
||||
#include "mediaplayer.h" |
||||
#include "MediaPlayer.h" |
||||
|
||||
MediaPlayer::MediaPlayer(void *n_winID) : m_ffmpeg(new FFmpeg), |
||||
sdl2_player(new SDL2Player), |
||||
f_Path(tr(nullptr)) |
||||
MediaPlayer::MediaPlayer() : m_AVFormatCtx(nullptr), |
||||
m_video_AVCodecContext(nullptr), |
||||
m_video_AVStream(nullptr), |
||||
m_audio_AVCodecContext(nullptr), |
||||
m_audio_AVStream(nullptr), |
||||
m_render_receive_obj(nullptr) |
||||
{ |
||||
m_winID = n_winID; |
||||
sdl2_player->set_player_title("SDL Player"); |
||||
} |
||||
|
||||
MediaPlayer::~MediaPlayer() |
||||
{ |
||||
} |
||||
|
||||
void MediaPlayer::run() |
||||
short format_convert(int format) |
||||
{ |
||||
short sdl_format = false; |
||||
switch (format) |
||||
{ |
||||
case AV_SAMPLE_FMT_U8: |
||||
case AV_SAMPLE_FMT_U8P: |
||||
sdl_format = AUDIO_U8; |
||||
break; |
||||
case AV_SAMPLE_FMT_S16: |
||||
case AV_SAMPLE_FMT_S16P: |
||||
sdl_format = AUDIO_S16SYS; |
||||
break; |
||||
case AV_SAMPLE_FMT_S32: |
||||
case AV_SAMPLE_FMT_S32P: |
||||
sdl_format = AUDIO_S32SYS; |
||||
break; |
||||
case AV_SAMPLE_FMT_FLT: |
||||
case AV_SAMPLE_FMT_FLTP: |
||||
case AV_SAMPLE_FMT_DBL: |
||||
case AV_SAMPLE_FMT_DBLP: |
||||
sdl_format = AUDIO_F32SYS; |
||||
break; |
||||
default: |
||||
printf("err fmt!=%d\n", format); |
||||
return false; |
||||
} |
||||
return sdl_format; |
||||
} |
||||
|
||||
char *av_pcm_clone(AVFrame *frame) |
||||
{ |
||||
assert(NULL != frame); |
||||
|
||||
int32_t bytes_per_sample = av_get_bytes_per_sample((enum AVSampleFormat)frame->format); |
||||
char *p_cur_ptr = NULL, *pcm_data = NULL; |
||||
if (bytes_per_sample <= 0) |
||||
return NULL; |
||||
|
||||
int32_t frame_size = frame->channels * frame->nb_samples * bytes_per_sample; |
||||
|
||||
// 1.For packet sample foramt we just store pcm data in byte order.
|
||||
if (!av_sample_fmt_is_planar((enum AVSampleFormat)frame->format)) |
||||
{ // linesize[0] maybe 0 or has padding bits,so calculate the real size by ourself.
|
||||
|
||||
p_cur_ptr = pcm_data = (char *)malloc(frame_size); |
||||
memcpy(p_cur_ptr, frame->data[0], frame_size); |
||||
} |
||||
else |
||||
{ // 2.For plane sample foramt, we must store pcm datas interleaved. [LRLRLR...LR].
|
||||
p_cur_ptr = pcm_data = (char *)malloc(frame_size); |
||||
for (int i = 0; i < frame->nb_samples; ++i) // nb_samples 每个样本的采样数,左右声道各采样
|
||||
{ |
||||
for (int j = 0; j < frame->channels; j++) |
||||
{ |
||||
memcpy(p_cur_ptr, frame->data[j] + i * bytes_per_sample, bytes_per_sample); |
||||
p_cur_ptr += bytes_per_sample; |
||||
} |
||||
} |
||||
} |
||||
return pcm_data; |
||||
} |
||||
void MediaPlayer::FreeAVFormatCtx() |
||||
{ |
||||
avformat_free_context(m_AVFormatCtx); |
||||
m_AVFormatCtx = nullptr; |
||||
} |
||||
bool MediaPlayer::OpenDecoder(AVCodecContext **codecContext, enum AVMediaType type) |
||||
{ |
||||
int stream_index = av_find_best_stream(m_AVFormatCtx, type, -1, -1, NULL, 0); |
||||
if (stream_index < 0) |
||||
{ |
||||
return false; |
||||
} |
||||
|
||||
// SDL 播放器
|
||||
sdl2_player->initPlayer(m_winID); |
||||
int isVideo = -2; |
||||
sdl2_player->switch_screen_texture(DEFAULT_MODE); |
||||
while (true) |
||||
AVStream *stream = m_AVFormatCtx->streams[stream_index]; |
||||
if (!stream) |
||||
{ |
||||
printf("stream is empty\n"); |
||||
return false; |
||||
} |
||||
if (type == AVMEDIA_TYPE_AUDIO) |
||||
{ |
||||
m_audio_AVStream = stream; |
||||
} |
||||
else if (type == AVMEDIA_TYPE_VIDEO) |
||||
{ |
||||
if (!state) |
||||
{
|
||||
sdl2_player->default_screen_texture(); |
||||
m_video_AVStream = stream; |
||||
} |
||||
|
||||
AVCodec *dec = (AVCodec *)avcodec_find_decoder(stream->codecpar->codec_id); |
||||
if (!dec) |
||||
{ |
||||
fprintf(stderr, "Failed to find %s codec\n", av_get_media_type_string(type)); |
||||
return false; |
||||
} |
||||
else |
||||
{ |
||||
*codecContext = avcodec_alloc_context3(dec); |
||||
if ((avcodec_parameters_to_context(*codecContext, stream->codecpar)) < 0) |
||||
{ |
||||
printf("Failed to copy %s codec parameters to decoder context\n"); |
||||
return false; |
||||
} |
||||
else |
||||
// AVFrame生命周期不由编码器关闭,由用户自己管理。如果不设置这个选项,一些frame数据将会丢失导致画面或者声音异常
|
||||
AVDictionary *opts = NULL; |
||||
av_dict_set(&opts, "refcounted_frames", "1", 0); |
||||
if (avcodec_open2(*codecContext, dec, &opts) < 0) |
||||
{ |
||||
printf("Failed to open %s codec\n"); |
||||
return false; |
||||
} |
||||
} |
||||
return true; |
||||
} |
||||
bool MediaPlayer::OpenFile(const char *filename) |
||||
{ |
||||
printf("avformat_open_input file:%s\n", filename); |
||||
if (m_AVFormatCtx) |
||||
{ |
||||
FreeAVFormatCtx(); |
||||
} |
||||
// 初始化媒体文件或媒体流的构成和基本信息
|
||||
m_AVFormatCtx = avformat_alloc_context(); |
||||
// 打开媒体文件
|
||||
if (avformat_open_input(&m_AVFormatCtx, filename, NULL, NULL) < 0) |
||||
{ |
||||
FreeAVFormatCtx(); |
||||
return false; |
||||
} |
||||
|
||||
av_dump_format(m_AVFormatCtx, 0, filename, 0); |
||||
// 查找流信息
|
||||
if (avformat_find_stream_info(m_AVFormatCtx, NULL) < 0) |
||||
{ |
||||
printf("Could not find stream information\n"); |
||||
FreeAVFormatCtx(); |
||||
return false; |
||||
} |
||||
// 查找视频解码器并打开解码器
|
||||
if (!OpenDecoder(&m_video_AVCodecContext, AVMEDIA_TYPE_VIDEO)) |
||||
{ |
||||
avcodec_free_context(&m_video_AVCodecContext); |
||||
printf("open video decoder error\n"); |
||||
return false; |
||||
} |
||||
else |
||||
{ |
||||
m_sws_ctx = sws_getContext( |
||||
m_video_AVCodecContext->width, |
||||
m_video_AVCodecContext->height, |
||||
m_video_AVCodecContext->pix_fmt, |
||||
m_video_AVCodecContext->width, |
||||
m_video_AVCodecContext->height, |
||||
AV_PIX_FMT_YUV420P, |
||||
SWS_POINT, |
||||
NULL, |
||||
NULL, // dst filter
|
||||
NULL // param
|
||||
); |
||||
} |
||||
// 查找音频解码器并打开解码器
|
||||
if (!OpenDecoder(&m_audio_AVCodecContext, AVMEDIA_TYPE_AUDIO)) |
||||
{ |
||||
avcodec_free_context(&m_audio_AVCodecContext); |
||||
printf("open audio decoder error\n"); |
||||
return false; |
||||
} |
||||
else |
||||
{ |
||||
SDL_AudioSpec spec; |
||||
spec.freq = m_audio_AVCodecContext->sample_rate; |
||||
spec.channels = (Uint8)m_audio_AVCodecContext->channels; |
||||
spec.format = format_convert(m_audio_AVCodecContext->sample_fmt); |
||||
spec.silence = (Uint8)0; |
||||
spec.samples = m_audio_AVCodecContext->frame_size; |
||||
spec.callback = NULL; // 使用push方式
|
||||
|
||||
// 开启SDL音频渲染
|
||||
m_current_audio_deviceId = m_render_receive_obj->openAudioDevice(&spec); |
||||
m_current_aduio_render_time = (double)av_gettime() / 1000000.0; |
||||
} |
||||
printf("open file done\n"); |
||||
return true; |
||||
} |
||||
void MediaPlayer::CloseFile() |
||||
{ |
||||
avformat_close_input(&m_AVFormatCtx); |
||||
} |
||||
bool MediaPlayer::StartPlay(const char *file_name) |
||||
{ |
||||
StopPlay(); |
||||
if (!OpenFile(file_name)) |
||||
{ |
||||
return false; |
||||
} |
||||
m_stop = false; |
||||
// 启动线程
|
||||
m_demux_thread = std::thread(&MediaPlayer::DemuxThread, this); |
||||
m_audio_decode_thread = std::thread(&MediaPlayer::AudioDecodeThread, this); |
||||
m_video_decode_thread = std::thread(&MediaPlayer::VideoDecodeThread, this); |
||||
m_video_render_thread = std::thread(&MediaPlayer::RenderAudioThread, this); |
||||
m_audio_render_thread = std::thread(&MediaPlayer::RenderVideoThread, this); |
||||
m_theoretical_render_video_time = m_theoretical_render_audio_time = (double)av_gettime() / 1000000.0; // 初始化时钟(获取当前系统时间,然后根据pts叠加到该时间上的方式进行同步)
|
||||
return true; |
||||
} |
||||
bool MediaPlayer::StopPlay() |
||||
{ |
||||
m_stop = true; |
||||
m_pause_condition_variable.notify_all(); |
||||
if (m_demux_thread.joinable()) |
||||
{ |
||||
m_demux_thread.join(); |
||||
} |
||||
if (m_video_decode_thread.joinable()) |
||||
{ |
||||
m_pkt_vidoe_condition_variable.notify_all(); |
||||
m_video_decode_thread.join(); |
||||
} |
||||
|
||||
if (m_video_render_thread.joinable()) |
||||
{ |
||||
m_frame_video_condition_varible.notify_all(); |
||||
m_video_render_thread.join(); |
||||
} |
||||
|
||||
if (m_audio_decode_thread.joinable()) |
||||
{ |
||||
m_pkt_audio_condition_variable.notify_all(); |
||||
m_audio_decode_thread.join(); |
||||
} |
||||
|
||||
if (m_audio_render_thread.joinable()) |
||||
{ |
||||
m_audio_render_thread.join(); |
||||
} |
||||
m_audio_frame_queue.clear(); |
||||
m_video_frame_queue.clear(); |
||||
m_video_frame_queue.clear(); |
||||
m_audio_packet_queue.clear(); |
||||
m_playStatus = PlayStatus::Stop; |
||||
CloseFile(); |
||||
return true; |
||||
} |
||||
void MediaPlayer::PauseResumePlay() |
||||
{ |
||||
if (PlayStatus::Pause == m_playStatus) |
||||
{ |
||||
m_playStatus = PlayStatus::Playing; |
||||
m_pause_condition_variable.notify_all(); |
||||
SDL_PauseAudioDevice(m_current_audio_deviceId, 0); |
||||
} |
||||
|
||||
else |
||||
{ |
||||
m_playStatus = PlayStatus::Pause; |
||||
SDL_PauseAudioDevice(m_current_audio_deviceId, 1); |
||||
} |
||||
} |
||||
void MediaPlayer::GetVideoSize(int &width, int &height) |
||||
{ |
||||
if (m_video_AVCodecContext) |
||||
{ |
||||
width = m_video_AVCodecContext->width; |
||||
height = m_video_AVCodecContext->height; |
||||
} |
||||
} |
||||
void MediaPlayer::registerRenderWindowsCallback(SDL2RenderWidget *receiver) |
||||
{ |
||||
m_render_receive_obj = receiver; |
||||
} |
||||
void MediaPlayer::PauseOrResume() |
||||
{ |
||||
std::unique_lock<std::mutex> locker(m_pause_mutex); |
||||
m_pause_condition_variable.wait(locker, [this]() |
||||
{ |
||||
if (PlayStatus::Pause == m_playStatus) |
||||
{ |
||||
printf("demux_thread m_pause_condition_variable PlayStatus::Pause\n"); |
||||
return false;//阻塞线程
|
||||
} |
||||
else |
||||
{ |
||||
return true;//通过notify唤醒wait时,需要再次执行lamada,当lamada返回true才可以继续执行,否则,继续阻塞
|
||||
} }); |
||||
} |
||||
void MediaPlayer::PushPkt2Queue(std::shared_ptr<MediaAVPacket> pkt) |
||||
{ |
||||
auto codecId = m_AVFormatCtx->streams[pkt->m_pakcet->stream_index]->codecpar->codec_type; |
||||
if (codecId == AVMEDIA_TYPE_VIDEO) |
||||
{ |
||||
/*
|
||||
time_t t = time(0); |
||||
char ch[64]; |
||||
strftime(ch, sizeof(ch), "%Y-%m-%d %H-%M-%S", localtime(&t)); //年-月-日 时-分-秒
|
||||
printf("Time:%s【demux_thread】-----m_video_packet_queue.push(pkt);\n",ch); |
||||
*/ |
||||
std::lock_guard<std::mutex> locker(m_pkt_video_queue_mutex); |
||||
m_video_packet_queue.push_back(pkt); |
||||
m_pkt_vidoe_condition_variable.notify_one(); |
||||
} |
||||
else if (codecId == AVMEDIA_TYPE_AUDIO) |
||||
{ |
||||
/*
|
||||
time_t t = time(0); |
||||
char ch[64]; |
||||
strftime(ch, sizeof(ch), "%Y-%m-%d %H-%M-%S", localtime(&t)); //年-月-日 时-分-秒
|
||||
printf("【demux_thread】-----m_audio_packet_queue.push(pkt);\n",ch); |
||||
*/ |
||||
std::lock_guard<std::mutex> locker(m_pkt_audio_queue_mutex); |
||||
m_audio_packet_queue.push_back(pkt); |
||||
m_pkt_audio_condition_variable.notify_one(); |
||||
} |
||||
} |
||||
void MediaPlayer::DemuxThread() |
||||
{ |
||||
while (!m_stop) |
||||
{ |
||||
PauseOrResume(); |
||||
|
||||
// 这段需要确保音视频同步后,才能正常工作。否则由于packet不够导致无法正常解码播放
|
||||
// printf("video packet queue size: %d, video frame queue size:%d, audio packet queue size:%d,audio frame queue size :%d\n",
|
||||
// m_video_packet_queue.size(), m_video_frame_queue.size(), m_audio_packet_queue.size(), m_audio_frame_queue.size());
|
||||
|
||||
{ |
||||
|
||||
if (!ffmpegstate) |
||||
std::lock_guard<std::mutex> locker(m_pkt_video_queue_mutex); |
||||
if (m_video_frame_queue.size() > 0) |
||||
{ |
||||
INITERRO r = m_ffmpeg->initFFmpeg(f_Path.toStdString().c_str()); |
||||
sdl2_player->set_window_size(m_ffmpeg->GetWidth(),m_ffmpeg->GetHeight());
|
||||
ffmpegstate = true;
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); |
||||
continue; |
||||
} |
||||
|
||||
auto buffer = m_ffmpeg->GetFrameYUV(isVideo);
|
||||
while (isVideo != -2) |
||||
{
|
||||
m_ffmpeg->setSize(sdl2_player->getWidth(),sdl2_player->getHeight()); |
||||
buffer = m_ffmpeg->GetFrameYUV(isVideo);
|
||||
if (isVideo == 1) |
||||
{ |
||||
auto r = sdl2_player->PlayYUV(buffer.get()); |
||||
if (r == -1) |
||||
{ |
||||
break; |
||||
} |
||||
} |
||||
} |
||||
|
||||
{ |
||||
std::lock_guard<std::mutex> locker(m_pkt_audio_queue_mutex); |
||||
if (m_audio_frame_queue.size() > 0) |
||||
{ |
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); |
||||
continue; |
||||
} |
||||
}
|
||||
SDL_Delay(25); |
||||
} |
||||
|
||||
std::shared_ptr<MediaAVPacket> packet = std::make_shared<MediaAVPacket>(); |
||||
int result = av_read_frame(m_AVFormatCtx, packet->m_pakcet); |
||||
if (result < 0) |
||||
{ |
||||
if (result == AVERROR_EOF) |
||||
{ |
||||
printf("file read end\n"); |
||||
break; |
||||
} |
||||
} |
||||
PushPkt2Queue(packet); |
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); |
||||
} |
||||
std::cout << "over!" << std::endl; |
||||
|
||||
m_demux_finish = true; |
||||
|
||||
printf("[finished]:void MediaPlayer::demux_thread()\n"); |
||||
} |
||||
|
||||
void MediaPlayer::started() |
||||
void MediaPlayer::VideoDecode(std::shared_ptr<MediaAVPacket> video_pkt) |
||||
{ |
||||
sdl2_player->switch_screen_texture(VOIDE_MODE); |
||||
state = true; |
||||
int complete = 0; |
||||
do |
||||
{ |
||||
std::shared_ptr<MediaAVFrame> media_frame = std::make_shared<MediaAVFrame>(); |
||||
if (!media_frame->m_frame) |
||||
{ |
||||
continue; |
||||
} |
||||
int ret = avcodec_send_packet(m_video_AVCodecContext, video_pkt->m_pakcet); |
||||
if (ret < 0) |
||||
{ |
||||
break; // 跳出内循环
|
||||
} |
||||
else |
||||
{ |
||||
complete = avcodec_receive_frame(m_video_AVCodecContext, media_frame->m_frame); |
||||
if (complete) |
||||
{ |
||||
std::lock_guard<std::mutex> locker(m_video_frame_queue_mutex); |
||||
m_video_frame_queue.push_back(media_frame); |
||||
} |
||||
video_pkt->m_pakcet->data = video_pkt->m_pakcet->data + ret; |
||||
video_pkt->m_pakcet->size = video_pkt->m_pakcet->size - ret; |
||||
} |
||||
} while (video_pkt->m_pakcet->size > 0); |
||||
} |
||||
void MediaPlayer::pause() |
||||
void MediaPlayer::VideoDecodeThread() |
||||
{ |
||||
} |
||||
while (!m_stop) |
||||
{ |
||||
// deal pause operate
|
||||
PauseOrResume(); |
||||
std::shared_ptr<MediaAVPacket> video_pkt; |
||||
{ |
||||
|
||||
void MediaPlayer::stop() |
||||
std::unique_lock<std::mutex> video_pkt_que_loker(m_pkt_video_queue_mutex); |
||||
m_pkt_vidoe_condition_variable.wait(video_pkt_que_loker, [this]() |
||||
{ |
||||
if (!m_video_packet_queue.empty()) |
||||
{ |
||||
return true; |
||||
} |
||||
else |
||||
{ |
||||
if (m_demux_finish) |
||||
{ |
||||
m_video_decode_finish = true; |
||||
return true; |
||||
} |
||||
else |
||||
{ |
||||
return false; |
||||
} |
||||
} }); |
||||
// 视频解码完成
|
||||
if (m_video_decode_finish) |
||||
{ |
||||
printf("video_decode_thread thread finished..................................\n"); |
||||
// flush 视频解码器缓存
|
||||
std::shared_ptr<MediaAVPacket> pkt = std::make_shared<MediaAVPacket>(); |
||||
pkt->m_pakcet->data = NULL; |
||||
pkt->m_pakcet->size = 0; |
||||
VideoDecode(pkt); |
||||
break; |
||||
} |
||||
|
||||
video_pkt = m_video_packet_queue.front(); |
||||
m_video_packet_queue.pop_front(); |
||||
} |
||||
VideoDecode(video_pkt); |
||||
} |
||||
printf("[finished]:void MediaPlayer::video_decode_thread()\n"); |
||||
} |
||||
void MediaPlayer::AudioDecode(std::shared_ptr<MediaAVPacket> audio_pkt) |
||||
{ |
||||
int complete = 0; |
||||
do |
||||
{ |
||||
std::shared_ptr<MediaAVFrame> media_frame = std::make_shared<MediaAVFrame>(); |
||||
int ret = avcodec_send_packet(m_audio_AVCodecContext, audio_pkt->m_pakcet); |
||||
if (ret < 0) |
||||
{ |
||||
break; |
||||
} |
||||
complete = avcodec_receive_frame(m_audio_AVCodecContext, media_frame->m_frame); |
||||
if (complete) |
||||
{ |
||||
std::unique_lock<std::mutex> locker(m_audio_frame_queue_mutex); |
||||
m_audio_frame_queue.push_back(media_frame); |
||||
} |
||||
audio_pkt->m_pakcet->size = audio_pkt->m_pakcet->size - ret; |
||||
audio_pkt->m_pakcet->data = audio_pkt->m_pakcet->data + ret; |
||||
} while (audio_pkt->m_pakcet->size > 0); |
||||
} |
||||
void MediaPlayer::AudioDecodeThread() |
||||
{ |
||||
while (!m_stop) |
||||
{ |
||||
PauseOrResume(); |
||||
std::shared_ptr<MediaAVPacket> audio_pkt; |
||||
// get audio pkt frome queue
|
||||
{ |
||||
|
||||
std::unique_lock<std::mutex> audio_que_loker(m_pkt_audio_queue_mutex); |
||||
m_pkt_audio_condition_variable.wait(audio_que_loker, [this]() |
||||
{ |
||||
if (!m_audio_packet_queue.empty()) |
||||
{ |
||||
return true; |
||||
} |
||||
else |
||||
{ |
||||
if (m_demux_finish) |
||||
{ |
||||
m_audio_decode_finish = true; |
||||
return true; |
||||
} |
||||
else |
||||
{ |
||||
return false;//block
|
||||
} |
||||
|
||||
void MediaPlayer::setPath(QString filePath) |
||||
} }); |
||||
// 解码完成,退出解码线程
|
||||
if (m_audio_decode_finish) |
||||
{ |
||||
// 刷新解码器缓存
|
||||
std::shared_ptr<MediaAVPacket> FlushPkt = std::make_shared<MediaAVPacket>(); |
||||
FlushPkt->m_pakcet->data = NULL; |
||||
FlushPkt->m_pakcet->size = 0; |
||||
AudioDecode(FlushPkt); |
||||
printf("audio_decodec thread finished..................\n"); |
||||
break; |
||||
} |
||||
audio_pkt = m_audio_packet_queue.front(); |
||||
m_audio_packet_queue.pop_front(); |
||||
} |
||||
AudioDecode(audio_pkt); |
||||
} |
||||
printf("[finished]:void MediaPlayer::audio_decode_thread()\n"); |
||||
} |
||||
void MediaPlayer::RenderVideoThread() |
||||
{ |
||||
f_Path = filePath; |
||||
int frameSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, m_video_AVCodecContext->width, m_video_AVCodecContext->height, 1); |
||||
uint8_t *buffer = (uint8_t *)av_malloc(frameSize); // 指向YUV420的数据部分
|
||||
|
||||
static double m_previous_pts_diff = 40e-3; |
||||
static double m_previous_pts = 0; |
||||
static bool m_first_frame = true; |
||||
double delay_until_next_wake; |
||||
while (!m_stop) |
||||
{ |
||||
bool late_first_frame = false; |
||||
PauseOrResume(); |
||||
if (m_video_decode_finish) |
||||
{ |
||||
break; |
||||
} |
||||
|
||||
std::unique_lock<std::mutex> locker(m_video_frame_queue_mutex); |
||||
if (m_video_frame_queue.empty()) |
||||
{ |
||||
locker.unlock(); |
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); |
||||
continue; |
||||
} |
||||
std::shared_ptr<MediaAVFrame> video_frame = m_video_frame_queue.front(); |
||||
m_video_frame_queue.pop_front(); |
||||
locker.unlock(); |
||||
|
||||
auto videoPts = video_frame->m_frame->best_effort_timestamp * av_q2d(m_video_AVStream->time_base); |
||||
double pts_diff = videoPts - m_previous_pts; |
||||
if (m_first_frame) |
||||
{ |
||||
late_first_frame = pts_diff >= 1.0; |
||||
m_first_frame = false; |
||||
} |
||||
if (pts_diff <= 0 || late_first_frame) |
||||
{ |
||||
pts_diff = m_previous_pts_diff; |
||||
} |
||||
m_previous_pts_diff = pts_diff; |
||||
m_previous_pts = videoPts; |
||||
|
||||
auto delay = av_gettime() - m_current_aduio_render_time; // 渲染到现在为止的延迟
|
||||
int currentPts = m_audio_current_pts + delay; // 当前音频的播放pts
|
||||
auto diff = videoPts - currentPts; // 【计算视频帧pts与当前音频帧pts的差值,使用这个值来判断是否要展示当前的视频帧】
|
||||
double sync_threshold; |
||||
|
||||
// 更新阈值
|
||||
sync_threshold = (pts_diff > AV_SYNC_THRESHOLD) |
||||
? pts_diff |
||||
: AV_SYNC_THRESHOLD; |
||||
|
||||
// 修正pts_diff
|
||||
if (abs(diff) < AV_NOSYNC_THRESHOLD) |
||||
{ |
||||
if (diff < -sync_threshold) // 如果diff小于0(表明视频帧落后),且超出了阈值,则立即播放
|
||||
{ |
||||
pts_diff = 0; |
||||
} |
||||
else if (diff > sync_threshold) // diff大于阈值,则表明当前视频帧没有放完,则延迟两个pts_diff
|
||||
{ |
||||
pts_diff = 2 * pts_diff; |
||||
} |
||||
} |
||||
m_theoretical_render_video_time += pts_diff; |
||||
|
||||
delay_until_next_wake = m_theoretical_render_video_time - (av_gettime() / 1000000.0L); |
||||
if (delay_until_next_wake < 0.010L) |
||||
{ |
||||
delay_until_next_wake = 0.010L; |
||||
} |
||||
|
||||
printf("render video delay:%lf\n", delay_until_next_wake); |
||||
auto wake_tp = std::chrono::high_resolution_clock::now() + std::chrono::duration<int, std::micro>((int)(delay_until_next_wake * 1000000 + 500)); |
||||
|
||||
std::this_thread::sleep_until(wake_tp); |
||||
|
||||
std::shared_ptr<MediaAVFrame> yuv_frame = std::make_shared<MediaAVFrame>(); |
||||
// 将yuv_frame->m_frame->data数组指向buffer,并按照codecContext和AVPixelFormat决定data指针数组各个成员的指向
|
||||
av_image_fill_arrays(yuv_frame->m_frame->data, // dst data[]
|
||||
yuv_frame->m_frame->linesize, // dst linesize[]
|
||||
buffer, // src buffer
|
||||
AV_PIX_FMT_YUV420P, // pixel format
|
||||
m_video_AVCodecContext->width, // width
|
||||
m_video_AVCodecContext->height, // height
|
||||
1 // align
|
||||
); |
||||
yuv_frame->m_frame->width = m_video_AVCodecContext->width; |
||||
yuv_frame->m_frame->height = m_video_AVCodecContext->height; |
||||
|
||||
// sws_scale将各种video_format转换为AV_PIX_FMT_YUV420P
|
||||
int sts = sws_scale(m_sws_ctx, // sws context
|
||||
video_frame->m_frame->data, // src slice
|
||||
video_frame->m_frame->linesize, // src stride
|
||||
0, // src slice y
|
||||
video_frame->m_frame->height, // src slice height
|
||||
yuv_frame->m_frame->data, // dst planes
|
||||
yuv_frame->m_frame->linesize); // dst strides
|
||||
|
||||
// use QApplication::instance() raplace m_renderRceiveObj because it may invoke by mutilthread. when it destruct will crash
|
||||
QMetaObject::invokeMethod( |
||||
QApplication::instance(), [=]() |
||||
{ m_render_receive_obj->updateImage(yuv_frame); }, |
||||
Qt::QueuedConnection); |
||||
} |
||||
av_free(buffer); |
||||
printf("[finished]:void MediaPlayer::render_video_thread()\n"); |
||||
} |
||||
void MediaPlayer::RenderAudioThread() |
||||
{ |
||||
while (!m_stop) |
||||
{ |
||||
PauseOrResume(); |
||||
bool late_first_frame = false; |
||||
std::unique_lock<std::mutex> locker(m_audio_frame_queue_mutex); |
||||
static double m_previous_pts_diff = 40e-3; |
||||
static bool m_first_frame = true; |
||||
if (!m_audio_frame_queue.empty()) |
||||
{ |
||||
std::shared_ptr<MediaAVFrame> audio_frame = m_audio_frame_queue.front(); |
||||
m_audio_frame_queue.pop_front(); |
||||
locker.unlock(); |
||||
|
||||
m_audio_current_pts = audio_frame->m_frame->best_effort_timestamp * av_q2d(m_audio_AVStream->time_base); |
||||
// the amount of time until we need to display this frame
|
||||
double diff = m_audio_current_pts - m_previous_audio_pts; |
||||
if (m_first_frame) |
||||
{ |
||||
late_first_frame = diff >= 1.0; |
||||
m_first_frame = false; |
||||
} |
||||
|
||||
if (diff <= 0 || late_first_frame) |
||||
{ // if diff is invalid, use previous
|
||||
diff = m_previous_pts_diff; |
||||
} |
||||
|
||||
// save for next time
|
||||
m_previous_audio_pts = m_audio_current_pts; |
||||
m_previous_pts_diff = diff; |
||||
|
||||
m_theoretical_render_audio_time += diff; // 理论上应该渲染的time_point
|
||||
double timeDiff = m_theoretical_render_audio_time - (av_gettime() / 1000000.0L); |
||||
|
||||
if (timeDiff < 0.010L) |
||||
{ |
||||
timeDiff = 0.01L; |
||||
} |
||||
|
||||
if (timeDiff > diff) |
||||
{ |
||||
timeDiff = diff; |
||||
} |
||||
|
||||
// printf("audio frame will delay:%lf \n", timeDiff);
|
||||
auto wake_tp = std::chrono::high_resolution_clock::now() + std::chrono::duration<int, std::micro>((int)(timeDiff * 1000000)); |
||||
|
||||
std::this_thread::sleep_until(wake_tp); |
||||
|
||||
int32_t bytes_per_sample = av_get_bytes_per_sample((enum AVSampleFormat)audio_frame->m_frame->format); |
||||
int32_t size = audio_frame->m_frame->nb_samples * bytes_per_sample * audio_frame->m_frame->channels; |
||||
char *data = av_pcm_clone(audio_frame->m_frame); |
||||
m_current_aduio_render_time = av_gettime(); |
||||
SDL_QueueAudio(m_current_audio_deviceId, data, size); |
||||
free((void *)data); |
||||
} |
||||
else |
||||
{ |
||||
locker.unlock(); |
||||
// printf("audio frame_queue is empty\n");
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); |
||||
} |
||||
if (m_audio_decode_finish) |
||||
{ |
||||
break; |
||||
} |
||||
} |
||||
printf("[finished]:void MediaPlayer::render_audio_thread()\n"); |
||||
} |
||||
|
@ -1,28 +1,110 @@
|
||||
#ifndef PLAYTHREAD_H |
||||
#define PLAYTHREAD_H |
||||
#ifndef MEDIAPALYER_H |
||||
#define MEDIAPALYER_H |
||||
|
||||
#include <QThread> |
||||
#include "sdl2player.h" |
||||
#include "ffmpeg.h" |
||||
class MediaPlayer:public QThread |
||||
#include <stdio.h> |
||||
extern "C" |
||||
{ |
||||
#include <libavformat/avformat.h> |
||||
#include <libavcodec/avcodec.h> |
||||
#include <libavutil/avutil.h> |
||||
#include <libswscale/swscale.h> |
||||
#include <libavutil/time.h> |
||||
#include <libavutil/imgutils.h> |
||||
}; |
||||
#include "MediaAVDecoder.h" |
||||
#include "MediaAVStream.h" |
||||
#include "MediaAVPacket.h" |
||||
#include "SDL2/SDL.h" |
||||
#include "SDL2RenderWidget.h" |
||||
#include <thread> |
||||
#include <condition_variable> |
||||
#include <mutex> |
||||
#include <deque> |
||||
|
||||
#include <QApplication> |
||||
|
||||
#define PACKETSIZE 1024*5*10 |
||||
#define AV_SYNC_THRESHOLD 0.01 |
||||
#define AV_NOSYNC_THRESHOLD 10.0 |
||||
|
||||
enum PlayStatus |
||||
{ |
||||
None, // 打开播放器时的状态
|
||||
Ready, // 加载文件之后
|
||||
Playing, // 播放中
|
||||
Pause, // 暂停
|
||||
Stop, // 停止
|
||||
}; |
||||
|
||||
class MediaPlayer |
||||
{ |
||||
Q_OBJECT |
||||
public: |
||||
MediaPlayer(void*); |
||||
MediaPlayer(); |
||||
~MediaPlayer(); |
||||
virtual void run(); |
||||
void started(); |
||||
void pause(); |
||||
void stop(); |
||||
void setPath(QString f_Path); |
||||
int isStop = 0; |
||||
bool StartPlay(const char *file_name); |
||||
bool StopPlay(); |
||||
void PauseResumePlay(); |
||||
void GetVideoSize(int &width, int &height); |
||||
bool OpenDecoder(AVCodecContext** codecContext, enum AVMediaType type); |
||||
public: |
||||
void registerRenderWindowsCallback(SDL2RenderWidget *receiver); |
||||
private: |
||||
bool state=false; |
||||
bool ffmpegstate=false; |
||||
QString f_Path; |
||||
void* m_winID; |
||||
SDL2Player* sdl2_player; |
||||
FFmpeg* m_ffmpeg; |
||||
}; |
||||
bool OpenFile(const char *filename); |
||||
void CloseFile(); |
||||
void PauseOrResume(); |
||||
void FreeAVFormatCtx(); |
||||
void DemuxThread(); |
||||
void PushPkt2Queue(std::shared_ptr<MediaAVPacket> pkt); |
||||
void VideoDecode(std::shared_ptr<MediaAVPacket> video_pkt); |
||||
void AudioDecode(std::shared_ptr<MediaAVPacket> audio_pkt); |
||||
void AudioDecodeThread(); |
||||
void VideoDecodeThread(); |
||||
void RenderAudioThread(); |
||||
void RenderVideoThread(); |
||||
private: |
||||
AVFormatContext* m_AVFormatCtx; |
||||
AVCodecContext* m_video_AVCodecContext; |
||||
AVStream* m_video_AVStream; |
||||
AVCodecContext* m_audio_AVCodecContext; |
||||
AVStream* m_audio_AVStream; |
||||
SwsContext* m_sws_ctx; |
||||
|
||||
SDL2RenderWidget* m_render_receive_obj = nullptr; |
||||
SDL_AudioDeviceID m_current_audio_deviceId; |
||||
|
||||
std::thread m_demux_thread; |
||||
std::thread m_video_decode_thread; |
||||
std::thread m_audio_decode_thread; |
||||
std::thread m_video_render_thread; |
||||
std::thread m_audio_render_thread; |
||||
|
||||
std::mutex m_pause_mutex; |
||||
std::mutex m_pkt_audio_queue_mutex; |
||||
std::mutex m_pkt_video_queue_mutex; |
||||
std::mutex m_audio_frame_queue_mutex; |
||||
std::mutex m_video_frame_queue_mutex; |
||||
|
||||
std::condition_variable m_pause_condition_variable; |
||||
std::condition_variable m_demuex_condition_variable; |
||||
std::condition_variable m_pkt_audio_condition_variable; |
||||
std::condition_variable m_pkt_vidoe_condition_variable; |
||||
std::condition_variable m_frame_video_condition_varible; |
||||
|
||||
#endif // PLAYTHREAD_H
|
||||
std::deque<std::shared_ptr<MediaAVPacket>> m_video_packet_queue; |
||||
std::deque<std::shared_ptr<MediaAVPacket>> m_audio_packet_queue; |
||||
std::deque<std::shared_ptr<MediaAVFrame>> m_audio_frame_queue; |
||||
std::deque<std::shared_ptr<MediaAVFrame>> m_video_frame_queue; |
||||
|
||||
bool m_demux_finish = false; |
||||
bool m_audio_decode_finish = false; |
||||
bool m_video_decode_finish = false; |
||||
|
||||
bool m_stop = false; |
||||
PlayStatus m_playStatus = PlayStatus::None; |
||||
double m_theoretical_render_audio_time; |
||||
double m_theoretical_render_video_time; |
||||
double m_current_aduio_render_time; |
||||
double m_previous_audio_pts = 0; |
||||
double m_audio_current_pts = 0; |
||||
}; |
||||
#endif |
@ -1,75 +0,0 @@
|
||||
#include "stb_ffmpeg.h" |
||||
|
||||
STB_FFmpeg::STB_FFmpeg(){ |
||||
|
||||
} |
||||
STB_FFmpeg::~STB_FFmpeg(){ |
||||
|
||||
} |
||||
SDL_Surface* STB_FFmpeg::default_logo_surface(std::string image){ |
||||
int req_format = STBI_rgb_alpha; |
||||
int width, height, orig_format; |
||||
unsigned char* data = stbi_load(image.c_str(), &width, &height, &orig_format, req_format); |
||||
if(data==NULL){ |
||||
return nullptr; |
||||
} |
||||
int depth, pitch; |
||||
Uint32 pixel_format; |
||||
if (req_format == STBI_rgb) { |
||||
depth = 24; |
||||
pitch = 3*width; |
||||
pixel_format = SDL_PIXELFORMAT_RGB24; |
||||
} else { |
||||
depth = 32; |
||||
pitch = 4*width; |
||||
pixel_format = SDL_PIXELFORMAT_RGBA32; |
||||
} |
||||
return SDL_CreateRGBSurfaceWithFormatFrom((void*)data, width, height,depth, pitch, pixel_format); |
||||
} |
||||
void STB_FFmpeg::readFileRaw (const std::string & fullPath, std::string & output) { |
||||
std::ifstream fs(fullPath.c_str(), std::ios::in | std::ios::binary); |
||||
|
||||
if (!fs.is_open()) { |
||||
std::cout << "readFileRaw: " << fullPath << " -- " << "WARNING: Could not open file." << std::endl; |
||||
return; |
||||
} |
||||
else { |
||||
std::cout << "Opened! " << fullPath << std::endl; |
||||
} |
||||
|
||||
fs.seekg (0, std::ios::end); |
||||
const size_t LEN = fs.tellg(); |
||||
fs.seekg (0, std::ios::beg); |
||||
|
||||
output.resize(LEN); |
||||
fs.read(&output[0], LEN); |
||||
fs.close(); |
||||
} |
||||
|
||||
void STB_FFmpeg::readFileRaw_toMemory (const std::string & fullPath, sttfont_memory & mem) { |
||||
std::ifstream fs(fullPath.c_str(), std::ios::in | std::ios::binary); |
||||
|
||||
if (!fs.is_open()) { |
||||
std::cout << "readFileRaw: " << fullPath << " -- " << "WARNING: Could not open file." << std::endl; |
||||
return; |
||||
} |
||||
else { |
||||
std::cout << "Opened! " << fullPath << std::endl; |
||||
} |
||||
|
||||
fs.seekg (0, std::ios::end); |
||||
const size_t LEN = fs.tellg(); |
||||
fs.seekg (0, std::ios::beg); |
||||
|
||||
mem.alloc(LEN); |
||||
fs.read(mem.data, LEN); |
||||
fs.close(); |
||||
} |
||||
void STB_FFmpeg::setFontArgs(SDL_Renderer* sdl_rander){ |
||||
fc.faceSize = 48; |
||||
fc.tabWidthInSpaces = 12; |
||||
sttfont_memory notoSans; |
||||
readFileRaw_toMemory((QDir::currentPath()+"/font/PingFang.ttf").toStdString(), notoSans); |
||||
fc.loadFontManaged(notoSans); |
||||
fc.bindRenderer(sdl_rander); |
||||
} |
@ -1,23 +0,0 @@
|
||||
#ifndef STBFFMPEG_H |
||||
#define STBFFMPEG_H |
||||
|
||||
#include "SDL2/SDL.h" |
||||
extern "C"{ |
||||
#include "stb_image.h" |
||||
} |
||||
|
||||
#include "stb_font.h" |
||||
#include <iostream> |
||||
#include <fstream> |
||||
#include <QDir> |
||||
class STB_FFmpeg{ |
||||
public: |
||||
STB_FFmpeg(); |
||||
~STB_FFmpeg(); |
||||
SDL_Surface* default_logo_surface(std::string image); |
||||
void readFileRaw (const std::string & fullPath, std::string & output); |
||||
void readFileRaw_toMemory (const std::string & fullPath, sttfont_memory & mem); |
||||
void setFontArgs(SDL_Renderer* sdl_rander); |
||||
sdl_stb_font_cache fc; |
||||
}; |
||||
#endif |
@ -1,13 +0,0 @@
|
||||
#ifdef STB_IMAGE_IMPLEMENTATION |
||||
#undef STB_IMAGE_IMPLEMENTATION |
||||
#endif |
||||
#define STB_IMAGE_IMPLEMENTATION |
||||
extern "C"{ |
||||
#include "stb_image.h" |
||||
} |
||||
|
||||
#ifdef SDL_STB_FONT_IMPL |
||||
#undef SDL_STB_FONT_IMPL |
||||
#endif |
||||
#define SDL_STB_FONT_IMPL |
||||
#include "stb_font.h" |
Loading…
Reference in new issue