#include "Thread_FFMPEG_LaLiu.h"
#define MAX_AUDIO_FRAME_SIZE 192000
//定义拉流的线程
class Thread_FFMPEG_LaLiu thread_laliu;
//线程执行起点
void Thread_FFMPEG_LaLiu::run()
{
//判断之前是否申请了空间
if(rgb24_data)
{
delete rgb24_data;
rgb24_data=nullptr;
}
if(yuv420p_data)
{
delete yuv420p_data;
yuv420p_data=nullptr;
}
LogSend("开始拉流.\n");
ffmpeg_rtmp_client();
}
//拉流
int Thread_FFMPEG_LaLiu::ffmpeg_rtmp_client()
{
int video_width=0;
int video_height=0;
// Allocate an AVFormatContext
AVFormatContext* format_ctx = avformat_alloc_context();
// 打开rtsp:打开输入流并读取标题。 编解码器未打开
const char* url =m_rtmp_addr.toUtf8().data();// "rtmp://193.112.142.152:8888/live/abcd";
LogSend(tr("拉流地址: %1\n").arg(url));
int ret = -1;
ret = avformat_open_input(&format_ctx, url, nullptr, nullptr);
if(ret != 0)
{
LogSend(tr("无法打开网址: %1, return value: %2 \n").arg(url).arg(ret));
return -1;
}
// 读取媒体文件的数据包以获取流信息
ret = avformat_find_stream_info(format_ctx, nullptr);
if(ret < 0)
{
LogSend(tr("无法获取流信息: %1\n").arg(ret));
return -1;
}
AVCodec *video_pCodec;
AVCodec *audio_pCodec;
// audio/video stream index
int video_stream_index = -1;
int audio_stream_index = -1;
LogSend(tr("视频中流的数量: %1\n").arg(format_ctx->nb_streams));
for(int i = 0; i < format_ctx->nb_streams; ++i)
{
const AVStream* stream = format_ctx->streams[i];
LogSend(tr("编码数据的类型: %1\n").arg(stream->codecpar->codec_id));
if(stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
// //判断视频流是否是H264格式
// if(stream->codecpar->codec_id!=AV_CODEC_ID_H264)
// {
// LogSend("当前视频编码格式暂时不支持. 目前只支持:H264\n");
// return 0;
// }
//查找解码器
video_pCodec=avcodec_find_decoder(AV_CODEC_ID_H264);
//打开解码器
int err = avcodec_open2(stream->codec,video_pCodec, NULL);
if(err!=0)
{
LogSend(tr("H264解码器打开失败.\n"));
return 0;
}
video_stream_index = i;
//得到视频帧的宽高
video_width=stream->codecpar->width;
video_height=stream->codecpar->height;
LogSend(tr("视频帧的尺寸(以像素为单位): (宽X高)%1x%2 像素格式: %3\n").arg(
stream->codecpar->width).arg(stream->codecpar->height).arg(stream->codecpar->format));
}
else if(stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
{
audio_stream_index = i;
qDebug()<<tr("音频样本格式: %1").arg(stream->codecpar->format);
// //判断音频流是否是AAC格式
// if(stream->codecpar->codec_id!=AV_CODEC_ID_AAC)
// {
// LogSend("当前音频编码格式暂时不支持. 目前只支持:AAC\n");
// return 0;
// }
//查找解码器
audio_pCodec=avcodec_find_decoder(AV_CODEC_ID_AAC);
//打开解码器
int err = avcodec_open2(stream->codec,audio_pCodec, nullptr);
if(err!=0)
{
LogSend(tr("AAC解码器打开失败.\n"));
return 0;
}
}
}
//初始化解码相关的参数
AVFrame *yuv420p_pFrame = nullptr;
AVFrame *PCM_pFrame = nullptr;
AVPacket *packet;
uint8_t *buffer;
struct SwrContext *convert_ctx;
int buffer_size;
if (video_stream_index == -1)
{
LogSend("没有检测到视频流.\n");
return -1;
}
else
{
yuv420p_pFrame = av_frame_alloc();// 存放解码后YUV数据的缓冲区
}
if (audio_stream_index == -1)
{
LogSend("没有检测到音频流.\n");
}
else
{
LogSend("检测到音频流.\n");
Audio_Out_Init();
PCM_pFrame = av_frame_alloc();// 存放解码后PCM数据的缓冲区
//创建packet,用于存储解码前音频的数据
packet = (AVPacket *)malloc(sizeof(AVPacket));
av_init_packet(packet);
//设置音频转码后输出相关参数
//采样的布局方式
uint64_t out_channel_layout = AV_CH_LAYOUT_MONO;
//采样个数
int out_nb_samples = 1024;
//采样格式
enum AVSampleFormat sample_fmt = AV_SAMPLE_FMT_S16;
//采样率
int out_sample_rate = 44100;
//通道数
int out_channels = av_get_channel_layout_nb_channels(out_channel_layout);
printf("%d\n",out_channels);
//创建buffer
buffer_size = av_samples_get_buffer_size(nullptr, out_channels, out_nb_samples, sample_fmt, 1);
//注意要用av_malloc
buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE * 2);
int64_t in_channel_layout = av_get_default_channel_layout(format_ctx->streams[audio_stream_index]->codec->channels);
//打开转码器
convert_ctx = swr_alloc();
//设置转码参数
convert_ctx = swr_alloc_set_opts(convert_ctx, out_channel_layout, sample_fmt, out_sample_rate, \
in_channel_layout, format_ctx->streams[audio_stream_index]->codec->sample_fmt, format_ctx->streams[audio_stream_index]->codec->sample_rate, 0, nullptr);
//初始化转码器
swr_init(convert_ctx);
}
//申请存放yuv420p数据的空间
yuv420p_data=new unsigned char[video_width*video_height*3/2];
//申请存放rgb24数据的空间
rgb24_data=new unsigned char[video_width*video_height*3];
int y_size=video_width*video_height;
AVPacket pkt;
int re;
bool send_flag=1;
run_flag=true;
while(run_flag)
{
//读取一帧数据
ret=av_read_frame(format_ctx, &pkt);
if(ret < 0)
{
continue;
}
//得到视频包
if(pkt.stream_index == video_stream_index)
{
//解码视频 frame
re = avcodec_send_packet(format_ctx->streams[video_stream_index]->codec,&pkt);//发送视频帧
if (re != 0)
{
av_packet_unref(&pkt);//不成功就释放这个pkt
continue;
}
re = avcodec_receive_frame(format_ctx->streams[video_stream_index]->codec, yuv420p_pFrame);//接受后对视频帧进行解码
if (re != 0)
{
av_packet_unref(&pkt);//不成功就释放这个pkt
continue;
}
//将YUV数据拷贝到缓冲区
memcpy(yuv420p_data,(const void *)yuv420p_pFrame->data[0],y_size);
memcpy(yuv420p_data+y_size,(const void *)yuv420p_pFrame->data[1],y_size/4);
memcpy(yuv420p_data+y_size+y_size/4,(const void *)yuv420p_pFrame->data[2],y_size/4);
//将yuv420p转为RGB24格式
YUV420P_to_RGB24(yuv420p_data,rgb24_data,video_width,video_height);
//加载图片数据
QImage image(rgb24_data,video_width,video_height,QImage::Format_RGB888);
VideoDataOutput(image); //发送信号
}
//得到音频包
if(pkt.stream_index == audio_stream_index)
{
if (audio_stream_index != -1)
{
//解码声音
re = avcodec_send_packet(format_ctx->streams[audio_st
- 1
- 2
- 3
- 4
前往页