#ifdef __cplusplus
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/imgutils.h"
#include "libswresample/swresample.h"
#include <libavutil\avassert.h>
#ifdef __cplusplus
};
#endif
#include "SoundRecordImpl.h"
#include <QDebug>
#include <QAudioDeviceInfo>
#include <thread>
#include <fstream>
#include <dshow.h>
using namespace std;
//g_collectFrameCnt等于g_encodeFrameCnt证明编解码帧数一致
int g_collectFrameCnt = 0; //采集帧数
int g_encodeFrameCnt = 0; //编码帧数
ScreenRecordImpl::ScreenRecordImpl(QObject * parent) :
QObject(parent)
, m_aIndex(-1)
, m_aFmtCtx(nullptr), m_oFmtCtx(nullptr)
, m_aDecodeCtx(nullptr)
, m_aEncodeCtx(nullptr)
, m_aFifoBuf(nullptr)
, m_stop(false)
, m_state(RecordState::NotStarted)
{
}
void ScreenRecordImpl::Init(const QVariantMap& map)
{
m_filePath = map["filePath"].toString();
m_bitrate = map["bit_rate"].toInt();
}
void ScreenRecordImpl::Start()
{
if (m_state == RecordState::NotStarted)
{
qDebug() << "start record";
m_state = RecordState::Started;
std::thread recordThread(&ScreenRecordImpl::RecordAudioThreadProc, this);
recordThread.detach();
}
else if (m_state == RecordState::Paused)
{
qDebug() << "continue record";
m_state = RecordState::Started;
m_cvNotPause.notify_one();
}
}
void ScreenRecordImpl::Pause()
{
qDebug() << "pause record";
m_state = RecordState::Paused;
}
void ScreenRecordImpl::Stop()
{
qDebug() << "stop record";
RecordState state = m_state;
m_state = RecordState::Stopped;
if (state == RecordState::Paused)
m_cvNotPause.notify_one();
}
static char *dup_wchar_to_utf8(wchar_t *w)
{
char *s = NULL;
int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
s = (char *)av_malloc(l);
if (s)
WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
return s;
}
static int check_sample_fmt(const AVCodec *codec, enum AVSampleFormat sample_fmt)
{
const enum AVSampleFormat *p = codec->sample_fmts;
while (*p != AV_SAMPLE_FMT_NONE) {
if (*p == sample_fmt)
return 1;
p++;
}
return 0;
}
int ScreenRecordImpl::OpenAudio()
{
int ret = -1;
AVCodec *decoder = nullptr;
qDebug() << GetSpeakerDeviceName();
qDebug() << GetMicrophoneDeviceName();
QString audioDeviceName = "audio=" + GetMicrophoneDeviceName();
//AVDictionary* options = nullptr;
//av_dict_set(&options, "list_devices", "true", 0);
//AVInputFormat *iformat = av_find_input_format("dshow");
//qDebug() << "Device Info=============";
//avformat_open_input(&m_aFmtCtx, "audio=dummy", iformat, &options);
//qDebug() << "========================";
//查找输入方式
AVInputFormat *ifmt = av_find_input_format("dshow");
//char * deviceName = dup_wchar_to_utf8(L"audio=麦克风 (Conexant SmartAudio HD)");
//char * deviceName = dup_wchar_to_utf8(L"audio=麦克风 (High Definition Audio 设备)");
if (avformat_open_input(&m_aFmtCtx, audioDeviceName.toStdString().c_str(), ifmt, nullptr) < 0)
{
qDebug() << "Can not open audio input stream";
return -1;
}
if (avformat_find_stream_info(m_aFmtCtx, nullptr) < 0)
return -1;
for (int i = 0; i < m_aFmtCtx->nb_streams; ++i)
{
AVStream * stream = m_aFmtCtx->streams[i];
if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
{
decoder = avcodec_find_decoder(stream->codecpar->codec_id);
if (decoder == nullptr)
{
printf("Codec not found.(没有找到解码器)\n");
return -1;
}
//从视频流中拷贝参数到codecCtx
m_aDecodeCtx = avcodec_alloc_context3(decoder);
if ((ret = avcodec_parameters_to_context(m_aDecodeCtx, stream->codecpar)) < 0)
{
qDebug() << "Audio avcodec_parameters_to_context failed,error code: " << ret;
return -1;
}
m_aIndex = i;
break;
}
}
if (0 > avcodec_open2(m_aDecodeCtx, decoder, NULL))
{
printf("can not find or open audio decoder!\n");
return -1;
}
return 0;
}
int ScreenRecordImpl::OpenOutput()
{
int ret = -1;
AVStream *vStream = nullptr, *aStream = nullptr;
string filePath = m_filePath.toStdString();
ret = avformat_alloc_output_context2(&m_oFmtCtx, nullptr, nullptr, filePath.c_str());
if (ret < 0)
{
qDebug() << "avformat_alloc_output_context2 failed";
return -1;
}
if (m_aFmtCtx->streams[m_aIndex]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
{
aStream = avformat_new_stream(m_oFmtCtx, NULL);
if (!aStream)
{
printf("can not new audio stream for output!\n");
return -1;
}
m_aOutIndex = aStream->index;
AVCodec *encoder = avcodec_find_encoder(m_oFmtCtx->oformat->audio_codec);
if (!encoder)
{
qDebug() << "Can not find audio encoder, id: " << m_oFmtCtx->oformat->audio_codec;
return -1;
}
m_aEncodeCtx = avcodec_alloc_context3(encoder);
if (nullptr == m_aEncodeCtx)
{
qDebug() << "audio avcodec_alloc_context3 failed";
return -1;
}
//ret = avcodec_parameters_to_context(m_aEncodeCtx, m_aFmtCtx->streams[m_aIndex]->codecpar);
//if (ret < 0)
//{
// qDebug() << "Output audio avcodec_parameters_to_context,error code:" << ret;
// return -1;
//}
m_aEncodeCtx->sample_fmt = encoder->sample_fmts ? encoder->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
m_aEncodeCtx->bit_rate = m_bitrate;
m_aEncodeCtx->sample_rate = 44100;
if (encoder->supported_samplerates)
{
m_aEncodeCtx->sample_rate = encoder->supported_samplerates[0];
for (int i = 0; encoder->supported_samplerates[i]; ++i)
{
if (encoder->supported_samplerates[i] == 44100)
m_aEncodeCtx->sample_rate = 44100;
}
}
m_aEncodeCtx->channels = av_get_channel_layout_nb_channels(m_aEncodeCtx->channel_layout);
m_aEncodeCtx->channel_layout = AV_CH_LAYOUT_STEREO;
if (encoder->channel_layouts)
{
m_aEncodeCtx->channel_layout = encoder->channel_layouts[0];
for (int i = 0; encoder->channel_layouts[i]; ++i)
{
if (encoder->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
m_aEncodeCtx->channel_layout = AV_CH_LAYOUT_STEREO;
}
}
m_aEncodeCtx->channels = av_get_channel_layout_nb_channels(m_aEncodeCtx->channel_layout);
m_aEncodeCtx->time_base = AVRational{ 1, m_aEncodeCtx->sample_rate };
aStream->time_base = AVRational{ 1, m_aEncodeCtx->sample_rate };
m_aEncodeCtx->codec_tag = 0;
m_aEncodeCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
if (!check_sample_fmt(encoder, m_aEncodeCtx->sample_fmt))
{
qDebug() << "Encoder does not support sample format " << av_get_sample_fmt_name(m_aEncodeCtx->sample_fmt);
return -1;
}
//打开音频编码器,打开后frame_size被设置
ret = avcodec_open2(m_aEncodeCtx, encoder, 0);
if (ret < 0)
{
qDebug() << "Can not open the audio encoder, id: " << encoder->id << "error code: " << ret;
return -1;
}
//将codecCtx中的参数传给音频输出流
ret = avcodec_parameters_from_context(aStream->codecpar, m_aEncodeCtx);
if (ret < 0)
{
qDebug() << "Output audio avcodec_parameters_from_context,error code:" << ret;
return -1;
}
m_swrCtx = swr_alloc();
if (!m_swrCtx)
{
qDebug() << "swr_alloc failed";
return -1;
}
av_opt_set_int(m_swrCtx, "in_channel_count", m_aDecodeCtx->channels, 0); //2
av_opt_set_int(m_swrCtx, "in_sample_rate", m_aDecodeCtx->sample_rate, 0); //44100
av_opt_set_sample_fmt(m_swrCtx, "in_sample_fmt", m_aDecodeCtx->sample_fmt, 0); //AV_SAMPLE_FMT_S16
av_opt_set_int(m_swrCtx, "out_channel_count", m_aEncodeCtx->channels, 0); //2
av_opt_set_int(m_swrCtx, "out_sample_rate", m_aEncodeCtx->sample_rate, 0); //44100
av_opt_set_sample_fmt(m_swrCtx, "out_sample_fmt", m_aEncodeCtx->sample_fmt, 0); //AV_SAMPLE_FMT_FLTP
if ((ret = swr_init(m_swrCtx)) < 0)
{
qDebug() << "swr_init failed";
return -1;
}
}
//打开输出文件
if (!(m_oFmtCtx->oformat->flags & AVFMT_NOFILE))