You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
k9copy/k9author/k9avidecode.cpp

405 lines
13 KiB

//
// C++ Implementation: k9avidecode
//
// Description:
//
//
// Author: Jean-Michel PETIT <k9copy@free.fr>, (C) 2007
//
// Copyright: See COPYING file that comes with this distribution
//
//
#include "config.h"
#include "k9avidecode.h"
#ifdef OLD_FFMPEG
#include <ffmpeg/avcodec.h>
#endif
#ifdef NEW_FFMPEG
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#endif
// This is probably the incorrect revision for when CODEC_TYPE_VIDEO was removed
// Please update the comparison below if you know the exact revision that CODEC_TYPE_VIDEO was removed in!
#if LIBAVCODEC_VERSION_INT < (AV_VERSION_INT(52,72,2))
#define AVMEDIA_TYPE_VIDEO CODEC_TYPE_VIDEO
#endif
#include <tqimage.h>
#include <dlfcn.h>
#include <tdelocale.h>
#include <cstdlib>
#include "ac.h"
void *CodecHandle=0;
void *FormatHandle=0;
void *UtilHandle=0;
void *SwscaleHandle=0;
int glibref=0;
#ifdef NEW_FFMPEG
void av_free_packet_internal(AVPacket *pkt)
{
if (pkt) {
if (pkt->destruct) pkt->destruct(pkt);
pkt->data = NULL; pkt->size = 0;
}
}
#endif
#ifdef HAVE_SWSCALE
#include "libswscale/swscale.h"
static int sws_flags = SWS_BICUBIC;
#endif
k9AviDecode::k9AviDecode(TQObject *parent, const char *name)
: TQObject(parent, name) {
if (glibref==0) {
CodecHandle=dlopen("libavcodec.so",RTLD_LAZY | RTLD_GLOBAL);
FormatHandle=dlopen("libavformat.so",RTLD_LAZY | RTLD_GLOBAL);
UtilHandle=dlopen("libavutil.so",RTLD_LAZY | RTLD_GLOBAL);
# ifdef HAVE_SWSCALE
SwscaleHandle=dlopen("libswscale.so",RTLD_LAZY);
if (SwscaleHandle==0)
SwscaleHandle=dlopen("libswscale.so.2",RTLD_LAZY);
# endif
}
if (!CodecHandle) {
m_error =i18n("Cannot open then library %1").arg("libavcodec");
return;
}
if (!FormatHandle) {
m_error =i18n("Cannot open then library %1").arg("libavformat");
return;
}
# if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 33, 0)
if (!UtilHandle) {
m_error =i18n("Cannot open then library %1").arg("libavutil");
return;
}
# endif
# ifdef HAVE_SWSCALE
if (!SwscaleHandle) {
m_error =i18n("Cannot open the library %1").arg("libswscale");
}
# endif
m_error="";
av_register_all = (av_register_all_t)dlsym(FormatHandle,"av_register_all");
# if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(53, 2, 0)
avformat_open_input = (avformat_open_input_t)dlsym(FormatHandle,"avformat_open_input");
# else
av_open_input_file = (av_open_input_file_t)dlsym(FormatHandle,"av_open_input_file");
# endif
# if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(53, 6, 0)
avformat_find_stream_info = (avformat_find_stream_info_t)dlsym(FormatHandle,"avformat_find_stream_info");
# else
av_find_stream_info = (av_find_stream_info_t)dlsym(FormatHandle,"av_find_stream_info");
# endif
avcodec_find_decoder =(avcodec_find_decoder_t) dlsym(CodecHandle,"avcodec_find_decoder");
# if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(53, 8, 0)
avcodec_open2 = (avcodec_open2_t)dlsym(CodecHandle,"avcodec_open2");
# else
avcodec_open = (avcodec_open_t)dlsym(CodecHandle,"avcodec_open");
# endif
avcodec_alloc_frame = (avcodec_alloc_frame_t)dlsym(CodecHandle,"avcodec_alloc_frame");
avpicture_get_size = (avpicture_get_size_t)dlsym(CodecHandle,"avpicture_get_size");
av_malloc = (av_malloc_t)dlsym(CodecHandle,"av_malloc");
avpicture_fill = (avpicture_fill_t)dlsym(CodecHandle,"avpicture_fill");
av_read_frame = (av_read_frame_t)dlsym(FormatHandle,"av_read_frame");
# if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(52, 23, 0)
avcodec_decode_video2 = (avcodec_decode_video2_t)dlsym(CodecHandle,"avcodec_decode_video2");
# else
avcodec_decode_video = (avcodec_decode_video_t)dlsym(CodecHandle,"avcodec_decode_video");
# endif
# ifndef HAVE_SWSCALE
img_convert = (img_convert_t)dlsym(CodecHandle,"img_convert");
//if img_convert is null (deprecated in ffmpeg), we need libswscale
if (!img_convert) {
m_error = i18n("Cannot open the library %1").arg("libswscale");
return;
}
# endif
av_free = (av_free_t)dlsym(CodecHandle,"av_free");
av_free_packet = (av_free_packet_t)dlsym(CodecHandle,"av_free_packet");
if (av_free_packet==0)
av_free_packet=av_free_packet_internal;
avcodec_close = (avcodec_close_t)dlsym(FormatHandle,"avcodec_close");
# if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(53, 17, 0)
avformat_close_input = (avformat_close_input_t)dlsym(FormatHandle,"avformat_close_input");
# else
av_close_input_file = (av_close_input_file_t)dlsym(FormatHandle,"av_close_input_file");
# endif
av_seek_frame=(av_seek_frame_t)dlsym(FormatHandle,"av_seek_frame");
av_rescale_q=(av_rescale_q_t)dlsym(FormatHandle,"av_rescale_q");
avcodec_flush_buffers=(avcodec_flush_buffers_t)dlsym(CodecHandle,"avcodec_flush_buffers");
# ifdef HAVE_SWSCALE
sws_freeContext= (sws_freeContext_t)dlsym(SwscaleHandle,"sws_freeContext");
sws_getContext=(sws_getContext_t)dlsym(SwscaleHandle,"sws_getContext");
sws_scale= (sws_scale_t)dlsym(SwscaleHandle,"sws_scale");
# endif
# if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 33, 0)
av_gettime=(av_gettime_t)dlsym(UtilHandle,"av_gettime");
# else
av_gettime=(av_gettime_t)dlsym(FormatHandle,"av_gettime");
# endif
av_register_all();
m_opened=false;
glibref++;
m_FormatCtx = NULL;
m_CodecCtx = NULL;
m_Codec = NULL;
m_Frame = NULL;
m_FrameRGB = NULL;
m_buffer = NULL;
}
k9AviDecode::~k9AviDecode() {
if (m_opened)
close();
glibref--;
if (glibref==0) {
dlclose(FormatHandle);
dlclose(CodecHandle);
if(UtilHandle) {
dlclose(UtilHandle);
}
# ifdef HAVE_SWSCALE
if (SwscaleHandle) {
dlclose(CodecHandle);
}
# endif
}
}
#include "k9avidecode.moc"
bool k9AviDecode::open(const TQString & _fileName) {
m_error="";
if (m_opened)
close();
// Open video file
if (
# if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(53, 2, 0)
avformat_open_input(&m_FormatCtx, _fileName.utf8(), NULL, NULL)!=0
# else
av_open_input_file(&m_FormatCtx, _fileName.utf8(), NULL, 0, NULL)!=0
# endif
) {
m_error=i18n("Couldn't open the file %1").arg(_fileName);
return false; // Couldn't open file}
}
// Retrieve stream information
if (
# if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(53, 6, 0)
avformat_find_stream_info(m_FormatCtx, NULL)<0
# else
av_find_stream_info(m_FormatCtx)<0
# endif
) {
m_error =i18n("Couldn't find stream information");
return false; // Couldn't find stream information
}
int i;
// Find the first video stream
m_videoStream=-1;
for (i=0; i<m_FormatCtx->nb_streams; i++)
if (m_FormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
m_videoStream=i;
break;
}
if (m_videoStream==-1) {
m_error=i18n("The file doesn't contain any video stream");
return false; // Didn't find a video stream
}
// Get a pointer to the codec context for the video stream
m_CodecCtx=m_FormatCtx->streams[m_videoStream]->codec;
// Find the decoder for the video stream
m_Codec=avcodec_find_decoder(m_CodecCtx->codec_id);
if (m_Codec==NULL) {
m_error=i18n("Unsupported codec");
return false; // Codec not found
}
// Open codec
if (
# if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(53, 8, 0)
avcodec_open2(m_CodecCtx, m_Codec, NULL)<0
# else
avcodec_open(m_CodecCtx, m_Codec)<0
# endif
) {
m_error =i18n("Could'nt open the codec");
return false; // Could not open codec
}
// Allocate video frame
m_Frame=avcodec_alloc_frame();
// Allocate an AVFrame structure
m_FrameRGB=avcodec_alloc_frame();
if (m_FrameRGB==NULL) {
m_error =i18n ("Unable to allocate memory for frames");
return false;
}
int numBytes;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, m_CodecCtx->width,
m_CodecCtx->height);
m_buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)m_FrameRGB, m_buffer, PIX_FMT_RGB24,
m_CodecCtx->width, m_CodecCtx->height);
m_duration=(double)m_FormatCtx->duration / AV_TIME_BASE;
m_opened=true;
m_fileName=_fileName;
return true;
}
void k9AviDecode::seek(double _seconds) {
AVRational time_base = m_FormatCtx->streams[m_videoStream]->time_base;
int64_t fspos = (int64_t)(_seconds * AV_TIME_BASE);
fspos=av_rescale_q(fspos, AV_TIME_BASE_Q, time_base);
int i=av_seek_frame(m_FormatCtx, m_videoStream, fspos, AVSEEK_FLAG_BACKWARD );
double pos=av_gettime() / 1000000;
}
void k9AviDecode::readFrame(double _seconds) {
AVRational time_base = m_FormatCtx->streams[m_videoStream]->time_base;
int64_t fspos = (int64_t)(_seconds * AV_TIME_BASE);
fspos=av_rescale_q(fspos, AV_TIME_BASE_Q, time_base);
int res=av_seek_frame(m_FormatCtx, m_videoStream, fspos, AVSEEK_FLAG_BACKWARD );
avcodec_flush_buffers(m_CodecCtx);
int frameFinished=0;
AVPacket packet;
# ifdef HAVE_SWSCALE
struct SwsContext *toRGB_convert_ctx;
# endif
bool bFound=false;
while (av_read_frame(m_FormatCtx, &packet)>=0 && !bFound) {
// Is this a packet from the video stream?
if (packet.stream_index==m_videoStream) {
// Decode video frame
# if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(52, 23, 0)
avcodec_decode_video2(m_CodecCtx, m_Frame, &frameFinished, &packet);
# else
avcodec_decode_video(m_CodecCtx, m_Frame, &frameFinished,
packet.data, packet.size);
# endif
// Did we get a video frame?
if (frameFinished) {
// if (m_Frame->pts >=fspos)
int64_t cur_dts=fspos;
# if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(54, 2, 0)
cur_dts= packet.dts;
# else
if (m_FormatCtx->cur_st)
cur_dts= m_FormatCtx->cur_st->cur_dts;
# endif
if (cur_dts >=fspos) {
bFound=true;
# ifndef HAVE_SWSCALE
// Convert the image from its native format to RGB
img_convert((AVPicture *)m_FrameRGB, PIX_FMT_RGB24,
(AVPicture*)m_Frame, m_CodecCtx->pix_fmt,
m_CodecCtx->width, m_CodecCtx->height);
// convert frame to TQImage
SaveFrame(m_FrameRGB, m_CodecCtx->width,
m_CodecCtx->height);
# else
toRGB_convert_ctx=sws_getContext(m_CodecCtx->width, m_CodecCtx->height, m_CodecCtx->pix_fmt, m_CodecCtx->width, m_CodecCtx->height, PIX_FMT_RGB24, sws_flags,NULL,NULL,NULL);
sws_scale(toRGB_convert_ctx, m_Frame->data, m_Frame->linesize, 0, m_CodecCtx->height, m_FrameRGB->data,m_FrameRGB->linesize);
// convert frame to QImage
SaveFrame(m_FrameRGB, m_CodecCtx->width,
m_CodecCtx->height);
sws_freeContext(toRGB_convert_ctx);
# endif
}
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
}
void k9AviDecode::SaveFrame(AVFrame *pFrame, int width, int height) {
TQImage pix;
int len =(int) (3*width*height);
char c[255];
// Write header
sprintf(c,"P6\n%d %d\n255\n", width, height);
char *s= (char*) malloc(len+strlen(c));
tc_memcpy(s,c,strlen(c));
tc_memcpy(s+strlen(c),pFrame->data[0], len);
pix.loadFromData((uchar*)s,strlen(c)+len);
free(s);
emit drawFrame( &pix);
}
void k9AviDecode::close() {
if (m_opened) {
// Free the RGB image
av_free(m_buffer);
av_free(m_FrameRGB);
// Free the YUV frame
av_free(m_Frame);
// Close the codec
avcodec_close(m_CodecCtx);
// Close the video file
# if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(53, 17, 0)
avformat_close_input(&m_FormatCtx);
# else
av_close_input_file(m_FormatCtx);
# endif
m_opened=false;
}
}
double k9AviDecode::getDuration() const {
return m_duration;
}
bool k9AviDecode::opened() const {
return m_opened;
}
TQString k9AviDecode::getFileName() const {
return m_fileName;
}
TQString k9AviDecode::getError() const {
return m_error;
}