Merge branch 'tmcqueengit push origin master-materials-patch-1'

pull/3/head
Christian Beier 7 years ago
commit 5e6a0daed5
No known key found for this signature in database
GPG Key ID: 421BB3B45C6067F8

@ -26,6 +26,7 @@ set(LIBVNCSRVEXAMPLE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/examples)
set(LIBVNCCLIEXAMPLE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/client_examples)
set(TESTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/test)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/")
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/libvncserver ${CMAKE_CURRENT_SOURCE_DIR}/common)
@ -41,6 +42,7 @@ option(WITH_GNUTLS "Search for the GnuTLS secure communications library to suppo
option(WITH_OPENSSL "Search for the OpenSSL cryptography library to support encryption" ON)
option(WITH_SYSTEMD "Search for libsystemd to build with systemd socket activation support" ON)
option(WITH_GCRYPT "Search for libgcrypt to support additional authentication methods in LibVNCClient" ON)
option(WITH_FFMPEG "Search for FFMPEG to build an example VNC to MPEG encoder" ON)
option(WITH_TIGHTVNC_FILETRANSFER "Enable filetransfer if there is pthreads support" ON)
option(WITH_24BPP "Allow 24 bpp" ON)
option(WITH_IPv6 "Enable IPv6 Support" ON)
@ -126,6 +128,10 @@ if(WITH_GCRYPT)
find_library(LIBGCRYPT_LIBRARIES gcrypt)
endif(WITH_GCRYPT)
if(WITH_FFMPEG)
find_package(FFMPEG)
endif(WITH_FFMPEG)
check_include_file("endian.h" LIBVNCSERVER_HAVE_ENDIAN_H)
check_include_file("fcntl.h" LIBVNCSERVER_HAVE_FCNTL_H)
@ -457,12 +463,12 @@ if(SDL_FOUND)
set(SDLvncviewer_EXTRA_SOURCES scrap.c)
endif(SDL_FOUND)
if(HAVE_FFMPEG)
if(FFMPEG_FOUND)
set(LIBVNCCLIENT_EXAMPLES
${LIBVNCCLIENT_EXAMPLES}
vnc2mpg
)
endif(HAVE_FFMPEG)
endif(FFMPEG_FOUND)
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/examples)

@ -3,6 +3,7 @@
* Simple movie writer for vnc; based on Libavformat API example from FFMPEG
*
* Copyright (c) 2003 Fabrice Bellard, 2004 Johannes E. Schindelin
* Updates copyright (c) 2017 Tyrel M. McQueen
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@ -25,412 +26,451 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <signal.h>
#include <math.h>
#ifndef M_PI
#define M_PI 3.1415926535897931
#endif
#include "avformat.h"
#include <signal.h>
#include <sys/time.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <rfb/rfbclient.h>
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define VNC_PIX_FMT AV_PIX_FMT_RGB565 /* pixel format generated by VNC client */
#define OUTPUT_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
/**************************************************************/
/* video output */
static int write_packet(AVFormatContext *oc, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
/* rescale output packet timestamp values from codec to stream timebase */
av_packet_rescale_ts(pkt, *time_base, st->time_base);
pkt->stream_index = st->index;
/* Write the compressed frame to the media file. */
return av_interleaved_write_frame(oc, pkt);
}
AVFrame *picture, *tmp_picture;
uint8_t *video_outbuf;
int frame_count, video_outbuf_size;
/*************************************************/
/* video functions */
/* add a video output stream */
AVStream *add_video_stream(AVFormatContext *oc, int codec_id, int w, int h)
{
AVCodecContext *c;
/* a wrapper around a single output video stream */
typedef struct {
AVStream *st;
AVCodec *codec;
AVCodecContext *enc;
int64_t pts;
AVFrame *frame;
AVFrame *tmp_frame;
struct SwsContext *sws;
} VideoOutputStream;
/* Add an output video stream. */
int add_video_stream(VideoOutputStream *ost, AVFormatContext *oc,
enum AVCodecID codec_id, int64_t br, int sr, int w, int h)
{
int i;
st = av_new_stream(oc, 0);
if (!st) {
fprintf(stderr, "Could not alloc stream\n");
exit(1);
}
#if LIBAVFORMAT_BUILD<4629
c = &st->codec;
#else
c = st->codec;
#endif
c->codec_id = codec_id;
c->codec_type = CODEC_TYPE_VIDEO;
/* put sample parameters */
c->bit_rate = 800000;
/* resolution must be a multiple of two */
c->width = w;
c->height = h;
/* frames per second */
#if LIBAVCODEC_BUILD<4754
c->frame_rate = STREAM_FRAME_RATE;
c->frame_rate_base = 1;
#else
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->pix_fmt = PIX_FMT_YUV420P;
#endif
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
/* find the encoder */
ost->codec = avcodec_find_encoder(codec_id);
if (!(ost->codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codec_id));
return -1;
} // no extra memory allocation from this call
if (ost->codec->type != AVMEDIA_TYPE_VIDEO) {
fprintf(stderr, "Encoder for '%s' does not seem to be for video.\n",
avcodec_get_name(codec_id));
return -2;
}
if (c->codec_id == CODEC_ID_MPEG1VIDEO){
/* needed to avoid using macroblocks in which some coeffs overflow
this doesn't happen with normal video, it just happens here as the
motion of the chroma plane doesn't match the luma plane */
c->mb_decision=2;
ost->enc = avcodec_alloc_context3(ost->codec);
if (!(ost->enc)) {
fprintf(stderr, "Could not alloc an encoding context\n");
return -3;
} // from now on need to call avcodec_free_context(&(ost->enc)) on error
/* Set codec parameters */
ost->enc->codec_id = codec_id;
ost->enc->bit_rate = br;
/* Resolution must be a multiple of two (round up to avoid buffer overflow). */
ost->enc->width = w + (w % 2);
ost->enc->height = h + (h % 2);
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
ost->enc->time_base = (AVRational){ 1, sr };
ost->enc->gop_size = 12; /* emit one intra frame every twelve frames at most */
ost->enc->pix_fmt = OUTPUT_PIX_FMT;
if (ost->enc->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
ost->enc->mb_decision = 2;
}
/* some formats want stream headers to be separate */
if(!strcmp(oc->oformat->name, "mp4") || !strcmp(oc->oformat->name, "mov") || !strcmp(oc->oformat->name, "3gp"))
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st;
ost->st = avformat_new_stream(oc, ost->codec);
if (!ost->st) {
fprintf(stderr, "Could not allocate stream\n");
avcodec_free_context(&(ost->enc));
return -4;
} // stream memory cleared up when oc is freed, so no need to do so later in this function on error
ost->st->id = oc->nb_streams-1;
ost->st->time_base = ost->enc->time_base;
ost->pts = 0;
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
ost->enc->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
// must wait to allocate frame buffers until codec is opened (in case codec changes the PIX_FMT)
return 0;
}
AVFrame *alloc_picture(int pix_fmt, int width, int height)
AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
{
AVFrame *picture;
uint8_t *picture_buf;
int size;
picture = avcodec_alloc_frame();
int ret;
picture = av_frame_alloc();
if (!picture)
return NULL;
size = avpicture_get_size(pix_fmt, width, height);
picture_buf = malloc(size);
if (!picture_buf) {
av_free(picture);
// from now on need to call av_frame_free(&picture) on error
picture->format = pix_fmt;
picture->width = width;
picture->height = height;
/* allocate the buffers for the frame data */
ret = av_frame_get_buffer(picture, 64);
if (ret < 0) {
fprintf(stderr, "Could not allocate frame data.\n");
av_frame_free(&picture);
return NULL;
}
avpicture_fill((AVPicture *)picture, picture_buf,
pix_fmt, width, height);
return picture;
}
} // use av_frame_free(&picture) to free memory from this call
void open_video(AVFormatContext *oc, AVStream *st)
int open_video(AVFormatContext *oc, VideoOutputStream *ost)
{
AVCodec *codec;
AVCodecContext *c;
#if LIBAVFORMAT_BUILD<4629
c = &st->codec;
#else
c = st->codec;
#endif
/* find the video encoder */
codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
int ret;
/* open the codec */
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
ret = avcodec_open2(ost->enc, ost->codec, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
return ret;
} // memory from this call freed when oc is freed, no need to do it on error in this call
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc);
if (ret < 0) {
fprintf(stderr, "Could not copy the stream parameters.\n");
return ret;
} // memory from this call is freed when oc (parent of ost->st) is freed, no need to do it on error in this call
/* allocate and init a re-usable frame */
ost->frame = alloc_picture(ost->enc->pix_fmt, ost->enc->width, ost->enc->height);
if (!(ost->frame)) {
fprintf(stderr, "Could not allocate video frame\n");
return -1;
} // from now on need to call av_frame_free(&(ost->frame)) on error
/* If the output format is not the same as the VNC format, then a temporary VNC format
* picture is needed too. It is then converted to the required
* output format. */
ost->tmp_frame = NULL;
ost->sws = NULL;
if (ost->enc->pix_fmt != VNC_PIX_FMT) {
ost->tmp_frame = alloc_picture(VNC_PIX_FMT, ost->enc->width, ost->enc->height);
if (!(ost->tmp_frame)) {
fprintf(stderr, "Could not allocate temporary picture\n");
av_frame_free(&(ost->frame));
return -2;
} // from now on need to call av_frame_free(&(ost->tmp_frame)) on error
ost->sws = sws_getCachedContext(ost->sws, ost->enc->width, ost->enc->height, VNC_PIX_FMT, ost->enc->width, ost->enc->height, ost->enc->pix_fmt, 0, NULL, NULL, NULL);
if (!(ost->sws)) {
fprintf(stderr, "Could not get sws context\n");
av_frame_free(&(ost->frame));
av_frame_free(&(ost->tmp_frame));
return -3;
} // from now on need to call sws_freeContext(ost->sws); ost->sws = NULL; on error
}
video_outbuf = NULL;
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
/* allocate output buffer */
/* XXX: API change will be done */
video_outbuf_size = 200000;
video_outbuf = malloc(video_outbuf_size);
}
return 0;
}
/* allocate the encoded raw picture */
picture = alloc_picture(c->pix_fmt, c->width, c->height);
if (!picture) {
fprintf(stderr, "Could not allocate picture\n");
exit(1);
/*
* encode current video frame and send it to the muxer
* return 0 on success, negative on error
*/
int write_video_frame(AVFormatContext *oc, VideoOutputStream *ost, int64_t pts)
{
int ret, ret2;
AVPacket pkt = { 0 };
if (pts <= ost->pts) return 0; // nothing to do
/* convert format if needed */
if (ost->tmp_frame) {
sws_scale(ost->sws, (const uint8_t * const *)ost->tmp_frame->data,
ost->tmp_frame->linesize, 0, ost->enc->height, ost->frame->data, ost->frame->linesize);
}
/* if the output format is not RGB565, then a temporary RGB565
picture is needed too. It is then converted to the required
output format */
tmp_picture = NULL;
if (c->pix_fmt != PIX_FMT_RGB565) {
tmp_picture = alloc_picture(PIX_FMT_RGB565, c->width, c->height);
if (!tmp_picture) {
fprintf(stderr, "Could not allocate temporary picture\n");
exit(1);
/* send the imager to encoder */
ost->pts = pts;
ost->frame->pts = ost->pts;
ret = avcodec_send_frame(ost->enc, ost->frame);
if (ret < 0) {
fprintf(stderr, "Error sending video frame to encoder: %s\n", av_err2str(ret));
return ret;
}
/* read all available packets */
ret2 = 0;
for (ret = avcodec_receive_packet(ost->enc, &pkt); ret == 0; ret = avcodec_receive_packet(ost->enc, &pkt)) {
ret2 = write_packet(oc, &(ost->enc->time_base), ost->st, &pkt);
if (ret2 < 0) {
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret2));
/* continue on this error to not gum up encoder */
}
}
if (ret2 < 0) return ret2;
if (!(ret == AVERROR(EAGAIN))) return ret; // if AVERROR(EAGAIN), means all available packets output, need more frames (i.e. success)
return 0;
}
void write_video_frame(AVFormatContext *oc, AVStream *st)
/*
* Write final video frame (i.e. drain codec).
*/
int write_final_video_frame(AVFormatContext *oc, VideoOutputStream *ost)
{
int out_size, ret;
AVCodecContext *c;
AVFrame *picture_ptr;
#if LIBAVFORMAT_BUILD<4629
c = &st->codec;
#else
c = st->codec;
#endif
if (c->pix_fmt != PIX_FMT_RGB565) {
/* as we only generate a RGB565 picture, we must convert it
to the codec pixel format if needed */
img_convert((AVPicture *)picture, c->pix_fmt,
(AVPicture *)tmp_picture, PIX_FMT_RGB565,
c->width, c->height);
}
picture_ptr = picture;
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* raw video case. The API will change slightly in the near
futur for that */
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= st->index;
pkt.data= (uint8_t *)picture_ptr;
pkt.size= sizeof(AVPicture);
ret = av_write_frame(oc, &pkt);
} else {
/* encode the image */
out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture_ptr);
/* if zero size, it means the image was buffered */
if (out_size != 0) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.pts= c->coded_frame->pts;
if(c->coded_frame->key_frame)
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= st->index;
pkt.data= video_outbuf;
pkt.size= out_size;
/* write the compressed frame in the media file */
ret = av_write_frame(oc, &pkt);
} else {
ret = 0;
}
int ret, ret2;
AVPacket pkt = { 0 };
/* send NULL image to encoder */
ret = avcodec_send_frame(ost->enc, NULL);
if (ret < 0) {
fprintf(stderr, "Error sending final video frame to encoder: %s\n", av_err2str(ret));
return ret;
}
if (ret != 0) {
fprintf(stderr, "Error while writing video frame\n");
exit(1);
/* read all available packets */
ret2 = 0;
for (ret = avcodec_receive_packet(ost->enc, &pkt); ret == 0; ret = avcodec_receive_packet(ost->enc, &pkt)) {
ret2 = write_packet(oc, &(ost->enc->time_base), ost->st, &pkt);
if (ret2 < 0) {
fprintf(stderr, "Error while writing final video frame: %s\n", av_err2str(ret2));
/* continue on this error to not gum up encoder */
}
}
frame_count++;
if (ret2 < 0) return ret2;
if (!(ret == AVERROR(EOF))) return ret;
return 0;
}
void close_video(AVFormatContext *oc, AVStream *st)
void close_video_stream(VideoOutputStream *ost)
{
avcodec_close(st->codec);
av_free(picture->data[0]);
av_free(picture);
if (tmp_picture) {
av_free(tmp_picture->data[0]);
av_free(tmp_picture);
}
av_free(video_outbuf);
avcodec_free_context(&(ost->enc));
av_frame_free(&(ost->frame));
av_frame_free(&(ost->tmp_frame));
sws_freeContext(ost->sws); ost->sws = NULL;
ost->codec = NULL; /* codec not an allocated item */
ost->st = NULL; /* freeing parent oc will free this memory */
}
static const char *filename;
static AVOutputFormat *fmt;
static AVFormatContext *oc;
static AVStream *video_st;
static double video_pts;
static int movie_open(int w, int h) {
if (fmt->video_codec != CODEC_ID_NONE) {
video_st = add_video_stream(oc, fmt->video_codec, w, h);
} else
return 1;
/* set the output parameters (must be done even if no
parameters). */
if (av_set_parameters(oc, NULL) < 0) {
fprintf(stderr, "Invalid output format parameters\n");
return 2;
/**************************************************************/
/* Output movie handling */
AVFormatContext *movie_open(char *filename, VideoOutputStream *video_st, int br, int fr, int w, int h) {
int ret;
AVFormatContext *oc;
/* allocate the output media context. */
ret = avformat_alloc_output_context2(&oc, NULL, NULL, filename);
if (ret < 0) {
fprintf(stderr, "Warning: Could not deduce output format from file extension: using MP4.\n");
ret = avformat_alloc_output_context2(&oc, NULL, "mp4", filename);
}
if (ret < 0) {
fprintf(stderr, "Error: Could not allocate media context: %s.\n", av_err2str(ret));
return NULL;
} // from now on, need to call avformat_free_context(oc); oc=NULL; to free memory on error
dump_format(oc, 0, filename, 1);
/* now that all the parameters are set, we can open the audio and
video codecs and allocate the necessary encode buffers */
if (video_st)
open_video(oc, video_st);
/* Add the video stream using the default format codec and initialize the codec. */
if (oc->oformat->video_codec != AV_CODEC_ID_NONE) {
ret = add_video_stream(video_st, oc, oc->oformat->video_codec, br, fr, w, h);
} else {
ret = -1;
}
if (ret < 0) {
fprintf(stderr, "Error: chosen output format does not have a video codec, or error %i\n", ret);
avformat_free_context(oc); oc = NULL;
return NULL;
} // from now on, need to call close_video_stream(video_st) to free memory on error
/* Now that all the parameters are set, we can open the codecs and allocate the necessary encode buffers. */
ret = open_video(oc, video_st);
if (ret < 0) {
fprintf(stderr, "Error: error opening video codec, error %i\n", ret);
close_video_stream(video_st);
avformat_free_context(oc); oc = NULL;
return NULL;
} // no additional calls required to free memory, as close_video_stream(video_st) will do it
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
fprintf(stderr, "Could not open '%s'\n", filename);
return 3;
if (!(oc->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open '%s': %s\n", filename,
av_err2str(ret));
close_video_stream(video_st);
avformat_free_context(oc); oc = NULL;
return NULL;
}
}
/* write the stream header, if any */
av_write_header(oc);
return 0;
} // will need to call avio_closep(&oc->pb) to free file handle on error
/* Write the stream header, if any. */
ret = avformat_write_header(oc, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when writing to output file: %s\n",
av_err2str(ret));
if (!(oc->oformat->flags & AVFMT_NOFILE))
avio_closep(&oc->pb);
close_video_stream(video_st);
avformat_free_context(oc); oc = NULL;
} // no additional items to free
return oc;
}
static int movie_close() {
int i;
void movie_close(AVFormatContext **ocp, VideoOutputStream *video_st) {
AVFormatContext *oc = *ocp;
/* Write the trailer, if any. The trailer must be written before you
* close the CodecContexts open when you wrote the header; otherwise
* av_write_trailer() may try to use memory that was freed on
* av_codec_close(). */
if (oc) {
if (video_st)
write_final_video_frame(oc, video_st);
/* close each codec */
close_video(oc, video_st);
av_write_trailer(oc);
/* write the trailer, if any */
av_write_trailer(oc);
/* Close the video codec. */
close_video_stream(video_st);
/* free the streams */
for(i = 0; i < oc->nb_streams; i++) {
av_freep(&oc->streams[i]);
}
if (!(oc->oformat->flags & AVFMT_NOFILE))
/* Close the output file. */
avio_closep(&oc->pb);
if (!(fmt->flags & AVFMT_NOFILE)) {
/* close the output file */
url_fclose(&oc->pb);
/* free the stream */
avformat_free_context(oc);
ocp = NULL;
}
}
/* free the stream */
av_free(oc);
/**************************************************************/
/* VNC globals */
VideoOutputStream video_st = { 0 };
rfbClient *client = NULL;
rfbBool quit = FALSE;
char *filename = NULL;
AVFormatContext *oc = NULL;
int bitrate = 1000000;
int framerate = 5;
long max_time = 0;
struct timespec start_time, cur_time;
/* Signal handling */
void signal_handler(int signal) {
quit=TRUE;
}
static rfbBool quit=FALSE;
static void signal_handler(int signal) {
fprintf(stderr,"Cleaning up.\n");
quit=TRUE;
/* returns time since start in pts units */
int64_t time_to_pts(int framerate, struct timespec *start_time, struct timespec *cur_time) {
time_t ds = cur_time->tv_sec - start_time->tv_sec;
long dns = cur_time->tv_nsec - start_time->tv_nsec;
/* use usecs */
int64_t dt = (int64_t)ds*(int64_t)1000000+(int64_t)dns/(int64_t)1000;
/* compute rv in units of frame number (rounding to nearest, not truncating) */
int64_t rv = (((int64_t)framerate)*dt + (int64_t)500000) / (int64_t)(1000000);
return rv;
}
/**************************************************************/
/* VNC callback functions */
static rfbBool resize(rfbClient* client) {
static rfbBool first=TRUE;
if(!first) {
movie_close();
perror("I don't know yet how to change resolutions!\n");
}
movie_open(client->width, client->height);
signal(SIGINT,signal_handler);
if(tmp_picture)
client->frameBuffer=tmp_picture->data[0];
else
client->frameBuffer=picture->data[0];
return TRUE;
rfbBool vnc_malloc_fb(rfbClient* client) {
movie_close(&oc, &video_st);
oc = movie_open(filename, &video_st, bitrate, framerate, client->width, client->height);
if (!oc)
return FALSE;
signal(SIGINT,signal_handler);
signal(SIGTERM,signal_handler);
signal(SIGQUIT,signal_handler);
signal(SIGABRT,signal_handler);
/* These assignments assumes the AVFrame buffer is contigous. This is true in current ffmpeg versions for
* most non-HW accelerated bits, but may not be true globally. */
if(video_st.tmp_frame)
client->frameBuffer=video_st.tmp_frame->data[0];
else
client->frameBuffer=video_st.frame->data[0];
return TRUE;
}
static void update(rfbClient* client,int x,int y,int w,int h) {
void vnc_update(rfbClient* client,int x,int y,int w,int h) {
}
/**************************************************************/
/* media file output */
int main(int argc, char **argv)
{
time_t stop=0;
rfbClient* client;
int i,j;
/* get a vnc client structure (don't connect yet). */
/* Initialize vnc client structure (don't connect yet). */
client = rfbGetClient(5,3,2);
client->format.redShift=11; client->format.redMax=31;
client->format.greenShift=5; client->format.greenMax=63;
client->format.blueShift=0; client->format.blueMax=31;
/* initialize libavcodec, and register all codecs and formats */
/* Initialize libavcodec, and register all codecs and formats. */
av_register_all();
if(!strncmp(argv[argc-1],":",1) ||
!strncmp(argv[argc-1],"127.0.0.1",9) ||
!strncmp(argv[argc-1],"localhost",9))
client->appData.encodingsString="raw";
filename=0;
/* Parse command line. */
for(i=1;i<argc;i++) {
j=i;
if(argc>i+1 && !strcmp("-o",argv[i])) {
filename=argv[2];
j+=2;
} else if(argc>i+1 && !strcmp("-t",argv[i])) {
stop=time(0)+atoi(argv[i+1]);
j+=2;
}
if(j>i) {
argc-=j-i;
memmove(argv+i,argv+j,(argc-i)*sizeof(char*));
i--;
}
j=i;
if(argc>i+1 && !strcmp("-o",argv[i])) {
filename=argv[i+1];
j+=2;
} else if(argc>i+1 && !strcmp("-t",argv[i])) {
max_time=atol(argv[i+1]);
if (max_time < 10 || max_time > 100000000) {
fprintf(stderr, "Warning: Nonsensical time-per-file %li, resetting to default.\n", max_time);
max_time = 0;
}
j+=2;
}
/* This is so that argc/argv are ready for passing to rfbInitClient */
if(j>i) {
argc-=j-i;
memmove(argv+i,argv+j,(argc-i)*sizeof(char*));
i--;
}
}
/* auto detect the output format from the name. default is
mpeg. */
fmt = filename?guess_format(NULL, filename, NULL):0;
if (!fmt) {
printf("Could not deduce output format from file extension: using MPEG.\n");
fmt = guess_format("mpeg", NULL, NULL);
/* default filename. */
if (!filename) {
fprintf(stderr, "Warning: No filename specified. Using output.mp4\n");
filename = "output.mp4";
}
if (!fmt) {
fprintf(stderr, "Could not find suitable output format\n");
exit(1);
}
/* allocate the output media context */
oc = av_alloc_format_context();
if (!oc) {
fprintf(stderr, "Memory error\n");
exit(1);
}
oc->oformat = fmt;
snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
/* add the audio and video streams using the default format codecs
and initialize the codecs */
video_st = NULL;
/* open VNC connection */
client->MallocFrameBuffer=resize;
client->GotFrameBufferUpdate=update;
/* open VNC connection. */
client->MallocFrameBuffer=vnc_malloc_fb;
client->GotFrameBufferUpdate=vnc_update;
if(!rfbInitClient(client,&argc,argv)) {
printf("usage: %s [-o output_file] [-t seconds] server:port\n"
"Shoot a movie from a VNC server.\n", argv[0]);
exit(1);
printf("usage: %s [-o output_file] [-t seconds-per-file] server:port\n", argv[0]);
return 1;
}
if(client->serverPort==-1)
client->vncRec->doNotSleep = TRUE; /* vncrec playback */
/* main loop */
/* main loop */
clock_gettime(CLOCK_MONOTONIC, &start_time);
while(!quit) {
int i=WaitForMessage(client,1000000/STREAM_FRAME_RATE);
if(i<0) {
movie_close();
return 0;
}
if(i)
if(!HandleRFBServerMessage(client))
quit=TRUE;
else {
/* compute current audio and video time */
video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
/* write interleaved audio and video frames */
write_video_frame(oc, video_st);
int i=WaitForMessage(client,10000/framerate); /* useful for timeout to be no more than 10 msec per second (=10000/framerate usec) */
if (i>0) {
if(!HandleRFBServerMessage(client))
quit=TRUE;
} else if (i<0) {
quit=TRUE;
}
if(stop!=0 && stop<time(0))
quit=TRUE;
if (!quit) {
clock_gettime(CLOCK_MONOTONIC, &cur_time);
write_video_frame(oc, &video_st, time_to_pts(framerate, &start_time, &cur_time));
if ((cur_time.tv_sec - start_time.tv_sec) > max_time && max_time > 0) {
quit = TRUE;
}
}
}
movie_close();
movie_close(&oc,&video_st);
return 0;
}

@ -0,0 +1,227 @@
#.rst:
# FindFFMPEG
# ----------
#
# Find the native FFMPEG includes and library
#
# This module defines::
#
# FFMPEG_INCLUDE_DIR, where to find avcodec.h, avformat.h ...
# FFMPEG_LIBRARIES, the libraries to link against to use FFMPEG.
# FFMPEG_FOUND, If false, do not try to use FFMPEG.
#
# also defined, but not for general use are::
#
# FFMPEG_avformat_LIBRARY, where to find the FFMPEG avformat library.
# FFMPEG_avcodec_LIBRARY, where to find the FFMPEG avcodec library.
#
# This is useful to do it this way so that we can always add more libraries
# if needed to ``FFMPEG_LIBRARIES`` if ffmpeg ever changes...
#=============================================================================
# Copyright: 1993-2008 Ken Martin, Will Schroeder, Bill Lorensen
#
# Distributed under the OSI-approved BSD License (the "License");
# see accompanying file Copyright.txt for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the License for more information.
#=============================================================================
# (To distribute this file outside of YCM, substitute the full
# License text for the above reference.)
# Originally from VTK project
find_path(FFMPEG_INCLUDE_DIR1 avformat.h
$ENV{FFMPEG_DIR}
$ENV{FFMPEG_DIR}/ffmpeg
$ENV{FFMPEG_DIR}/libavformat
$ENV{FFMPEG_DIR}/include/libavformat
$ENV{FFMPEG_DIR}/include/ffmpeg
/usr/local/include/ffmpeg
/usr/include/ffmpeg
/usr/include/libavformat
/usr/include/ffmpeg/libavformat
/usr/include/${CMAKE_LIBRARY_ARCHITECTURE}/libavformat
/usr/local/include/libavformat
)
find_path(FFMPEG_INCLUDE_DIR2 avutil.h
$ENV{FFMPEG_DIR}
$ENV{FFMPEG_DIR}/ffmpeg
$ENV{FFMPEG_DIR}/libavutil
$ENV{FFMPEG_DIR}/include/libavutil
$ENV{FFMPEG_DIR}/include/ffmpeg
/usr/local/include/ffmpeg
/usr/include/ffmpeg
/usr/include/libavutil
/usr/include/ffmpeg/libavutil
/usr/include/${CMAKE_LIBRARY_ARCHITECTURE}/libavutil
/usr/local/include/libavutil
)
find_path(FFMPEG_INCLUDE_DIR3 avcodec.h
$ENV{FFMPEG_DIR}
$ENV{FFMPEG_DIR}/ffmpeg
$ENV{FFMPEG_DIR}/libavcodec
$ENV{FFMPEG_DIR}/include/libavcodec
$ENV{FFMPEG_DIR}/include/ffmpeg
/usr/local/include/ffmpeg
/usr/include/ffmpeg
/usr/include/libavcodec
/usr/include/ffmpeg/libavcodec
/usr/include/${CMAKE_LIBRARY_ARCHITECTURE}/libavcodec
/usr/local/include/libavcodec
)
find_path(FFMPEG_INCLUDE_DIR4 swscale.h
$ENV{FFMPEG_DIR}
$ENV{FFMPEG_DIR}/ffmpeg
$ENV{FFMPEG_DIR}/libswscale
$ENV{FFMPEG_DIR}/include/libswscale
$ENV{FFMPEG_DIR}/include/ffmpeg
/usr/local/include/ffmpeg
/usr/include/ffmpeg
/usr/include/libswscale
/usr/include/ffmpeg/libswscale
/usr/include/${CMAKE_LIBRARY_ARCHITECTURE}/libswscale
/usr/local/include/libswscale
)
find_path(FFMPEG_INCLUDE_DIR5 avdevice.h
$ENV{FFMPEG_DIR}
$ENV{FFMPEG_DIR}/ffmpeg
$ENV{FFMPEG_DIR}/libavdevice
$ENV{FFMPEG_DIR}/include/libavdevice
$ENV{FFMPEG_DIR}/include/ffmpeg
/usr/local/include/ffmpeg
/usr/include/ffmpeg
/usr/include/libavdevice
/usr/include/ffmpeg/libavdevice
/usr/include/${CMAKE_LIBRARY_ARCHITECTURE}/libavdevice
/usr/local/include/libavdevice
)
if(FFMPEG_INCLUDE_DIR1)
if(FFMPEG_INCLUDE_DIR2)
if(FFMPEG_INCLUDE_DIR3)
set(FFMPEG_INCLUDE_DIR ${FFMPEG_INCLUDE_DIR1}
${FFMPEG_INCLUDE_DIR2}
${FFMPEG_INCLUDE_DIR3})
endif()
endif()
endif()
if(FFMPEG_INCLUDE_DIR4)
set(FFMPEG_INCLUDE_DIR ${FFMPEG_INCLUDE_DIR}
${FFMPEG_INCLUDE_DIR4})
endif()
if(FFMPEG_INCLUDE_DIR5)
set(FFMPEG_INCLUDE_DIR ${FFMPEG_INCLUDE_DIR}
${FFMPEG_INCLUDE_DIR5}
${FFMPEG_INCLUDE_DIR5}/..)
endif()
find_library(FFMPEG_avformat_LIBRARY avformat
$ENV{FFMPEG_DIR}
$ENV{FFMPEG_DIR}/lib
$ENV{FFMPEG_DIR}/libavformat
/usr/local/lib
/usr/lib
)
find_library(FFMPEG_avcodec_LIBRARY avcodec
$ENV{FFMPEG_DIR}
$ENV{FFMPEG_DIR}/lib
$ENV{FFMPEG_DIR}/libavcodec
/usr/local/lib
/usr/lib
)
find_library(FFMPEG_avutil_LIBRARY avutil
$ENV{FFMPEG_DIR}
$ENV{FFMPEG_DIR}/lib
$ENV{FFMPEG_DIR}/libavutil
/usr/local/lib
/usr/lib
)
if(NOT DISABLE_SWSCALE)
find_library(FFMPEG_swscale_LIBRARY swscale
$ENV{FFMPEG_DIR}
$ENV{FFMPEG_DIR}/lib
$ENV{FFMPEG_DIR}/libswscale
/usr/local/lib
/usr/lib
)
endif(NOT DISABLE_SWSCALE)
find_library(FFMPEG_avdevice_LIBRARY avdevice
$ENV{FFMPEG_DIR}
$ENV{FFMPEG_DIR}/lib
$ENV{FFMPEG_DIR}/libavdevice
/usr/local/lib
/usr/lib
)
find_library(_FFMPEG_z_LIBRARY_ z
$ENV{FFMPEG_DIR}
$ENV{FFMPEG_DIR}/lib
/usr/local/lib
/usr/lib
)
if(FFMPEG_INCLUDE_DIR)
if(FFMPEG_avformat_LIBRARY)
if(FFMPEG_avcodec_LIBRARY)
if(FFMPEG_avutil_LIBRARY)
set(FFMPEG_FOUND "YES")
set(FFMPEG_LIBRARIES ${FFMPEG_avformat_LIBRARY}
${FFMPEG_avcodec_LIBRARY}
${FFMPEG_avutil_LIBRARY}
)
if(FFMPEG_swscale_LIBRARY)
set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES}
${FFMPEG_swscale_LIBRARY}
)
endif()
if(FFMPEG_avdevice_LIBRARY)
set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES}
${FFMPEG_avdevice_LIBRARY}
)
endif()
if(_FFMPEG_z_LIBRARY_)
set( FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES}
${_FFMPEG_z_LIBRARY_}
)
endif()
endif()
endif()
endif()
endif()
mark_as_advanced(
FFMPEG_INCLUDE_DIR
FFMPEG_INCLUDE_DIR1
FFMPEG_INCLUDE_DIR2
FFMPEG_INCLUDE_DIR3
FFMPEG_INCLUDE_DIR4
FFMPEG_INCLUDE_DIR5
FFMPEG_avformat_LIBRARY
FFMPEG_avcodec_LIBRARY
FFMPEG_avutil_LIBRARY
FFMPEG_swscale_LIBRARY
FFMPEG_avdevice_LIBRARY
_FFMPEG_z_LIBRARY_
)
# Set package properties if FeatureSummary was included
if(COMMAND set_package_properties)
set_package_properties(FFMPEG PROPERTIES DESCRIPTION "A complete, cross-platform solution to record, convert and stream audio and video")
set_package_properties(FFMPEG PROPERTIES URL "http://ffmpeg.org/")
endif()
Loading…
Cancel
Save