LibVNCClient: Add H.264 encoding for framebuffer updates

This patch implements support in LibVNCClient for framebuffer updates
encoded as H.264 frames. Hardware accelerated decoding is performed
using VA API.

This is experimental support to let the community explore the possibilities
offered by the potential bandwidth and latency reductions that H.264 encoding
allows. This may be particularly useful for use cases such as online gaming,
hosted desktops, hosted set top boxes...

This patch only provides the client side support and is meant to be used
with corresponding server-side support, as provided by an upcoming patch for
qemu ui/vnc module (to view the display of a virtual machine executing under
QEMU).

With this H.264-based encoding, if multiple framebuffer update messages
are generated for a single server framebuffer modification, the H.264
frame data is sent only with the first update message. Subsequent update
framebuffer messages will contain only the coordinates and size of the
additional updated regions.

Instructions/Requirements:
* The patch should be applied on top of the previous patch I submitted with
minor enhancements to the gtkvncviewer application:
http://sourceforge.net/mailarchive/message.php?msg_id=30323804
* Currently only works with libva 1.0: use branch "v1.0-branch" for libva and
intel-driver. Those can be built as follows:
   cd libva
   git checkout v1.0-branch
   ./autogen.sh
   make
   sudo make install
   cd ..
   git clone git://anongit.freedesktop.org/vaapi/intel-driver
   cd intel-driver
   git checkout v1.0-branch
   ./autogen.sh
   make
   sudo make install

Signed-off-by: David Verbeiren <david.verbeiren@intel.com>
pull/1/head
David Verbeiren 11 years ago committed by Christian Beier
parent 98d49517ed
commit d891478ec9

@ -23,6 +23,10 @@
#include <gdk/gdkkeysyms.h> #include <gdk/gdkkeysyms.h>
#include <rfb/rfbclient.h> #include <rfb/rfbclient.h>
#ifdef LIBVNCSERVER_CONFIG_LIBVA
#include <gdk/gdkx.h>
#endif
static rfbClient *cl; static rfbClient *cl;
static gchar *server_cut_text = NULL; static gchar *server_cut_text = NULL;
static gboolean framebuffer_allocated = FALSE; static gboolean framebuffer_allocated = FALSE;
@ -57,6 +61,14 @@ static gboolean expose_event (GtkWidget *widget,
cl->format.greenMax = (1 << image->visual->green_prec) - 1; cl->format.greenMax = (1 << image->visual->green_prec) - 1;
cl->format.blueMax = (1 << image->visual->blue_prec) - 1; cl->format.blueMax = (1 << image->visual->blue_prec) - 1;
#ifdef LIBVNCSERVER_CONFIG_LIBVA
/* Allow libvncclient to use a more efficient way
* of putting the framebuffer on the screen when
* using the H.264 format.
*/
cl->outputWindow = GDK_WINDOW_XID(widget->window);
#endif
SetFormatAndEncodings (cl); SetFormatAndEncodings (cl);
framebuffer_allocated = TRUE; framebuffer_allocated = TRUE;
@ -67,12 +79,14 @@ static gboolean expose_event (GtkWidget *widget,
gdk_cursor_unref( cur ); gdk_cursor_unref( cur );
} }
#ifndef LIBVNCSERVER_CONFIG_LIBVA
gdk_draw_image (GDK_DRAWABLE (widget->window), gdk_draw_image (GDK_DRAWABLE (widget->window),
widget->style->fg_gc[gtk_widget_get_state(widget)], widget->style->fg_gc[gtk_widget_get_state(widget)],
image, image,
event->area.x, event->area.y, event->area.x, event->area.y,
event->area.x, event->area.y, event->area.x, event->area.y,
event->area.width, event->area.height); event->area.width, event->area.height);
#endif
return FALSE; return FALSE;
} }
@ -462,10 +476,12 @@ static void update (rfbClient *cl, int x, int y, int w, int h) {
dialog_connecting = NULL; dialog_connecting = NULL;
} }
#ifndef LIBVNCSERVER_CONFIG_LIBVA
GtkWidget *drawing_area = rfbClientGetClientData (cl, gtk_init); GtkWidget *drawing_area = rfbClientGetClientData (cl, gtk_init);
if (drawing_area != NULL) if (drawing_area != NULL)
gtk_widget_queue_draw_area (drawing_area, x, y, w, h); gtk_widget_queue_draw_area (drawing_area, x, y, w, h);
#endif
} }
static void kbd_leds (rfbClient *cl, int value, int pad) { static void kbd_leds (rfbClient *cl, int value, int pad) {

@ -151,6 +151,20 @@ HAVE_X11="false"
AC_PATH_XTRA AC_PATH_XTRA
AH_TEMPLATE(HAVE_X11, [X11 build environment present]) AH_TEMPLATE(HAVE_X11, [X11 build environment present])
# See if we want libva support
# TODO: check if library actually exists
AH_TEMPLATE(CONFIG_LIBVA, [Build libva support])
AC_ARG_WITH(libva,
[ --with-libva build libva support],,)
if test "x$with_libva" != "xno"; then
AC_CHECK_LIB(va, vaInitialize,
VA_LIBS="-lva -lva-x11"
[AC_DEFINE(CONFIG_LIBVA) CONFIG_LIBVA="true"], ,)
fi
AC_SUBST(VA_LIBS)
AM_CONDITIONAL(CONFIG_LIBVA, test ! -z "$VA_LIBS")
# See if we are to build x11vnc: # See if we are to build x11vnc:
AH_TEMPLATE(HAVE_SYSTEM_LIBVNCSERVER, [Use the system libvncserver build environment for x11vnc.]) AH_TEMPLATE(HAVE_SYSTEM_LIBVNCSERVER, [Use the system libvncserver build environment for x11vnc.])
AC_ARG_WITH(system-libvncserver, AC_ARG_WITH(system-libvncserver,

@ -14,7 +14,7 @@ endif
libvncclient_la_SOURCES=cursor.c listen.c rfbproto.c sockets.c vncviewer.c ../common/minilzo.c $(TLSSRCS) libvncclient_la_SOURCES=cursor.c listen.c rfbproto.c sockets.c vncviewer.c ../common/minilzo.c $(TLSSRCS)
libvncclient_la_LIBADD=$(TLSLIBS) libvncclient_la_LIBADD=$(TLSLIBS) $(VA_LIBS)
noinst_HEADERS=../common/lzodefs.h ../common/lzoconf.h ../common/minilzo.h tls.h noinst_HEADERS=../common/lzodefs.h ../common/lzoconf.h ../common/minilzo.h tls.h

@ -0,0 +1,644 @@
/*
* Copyright (C) 2012 Intel Corporation. All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#ifdef LIBVNCSERVER_CONFIG_LIBVA
#include <X11/Xlib.h>
#include <va/va_x11.h>
enum _slice_types {
SLICE_TYPE_P = 0, /* Predicted */
SLICE_TYPE_B = 1, /* Bi-predicted */
SLICE_TYPE_I = 2, /* Intra coded */
};
#define SURFACE_NUM 7
VADisplay va_dpy = NULL;
VAConfigID va_config_id;
VASurfaceID va_surface_id[SURFACE_NUM];
VAContextID va_context_id = 0;
VABufferID va_pic_param_buf_id[SURFACE_NUM];
VABufferID va_mat_param_buf_id[SURFACE_NUM];
VABufferID va_sp_param_buf_id[SURFACE_NUM];
VABufferID va_d_param_buf_id[SURFACE_NUM];
static int cur_height = 0;
static int cur_width = 0;
static unsigned int num_frames = 0;
static int sid = 0;
static unsigned int frame_id = 0;
static int field_order_count = 0;
static VASurfaceID curr_surface = VA_INVALID_ID;
VAStatus gva_status;
VASurfaceStatus gsurface_status;
#define CHECK_SURF(X) \
gva_status = vaQuerySurfaceStatus(va_dpy, X, &gsurface_status); \
if (gsurface_status != 4) printf("ss: %d\n", gsurface_status);
#ifdef _DEBUG
#define DebugLog(A) rfbClientLog A
#else
#define DebugLog(A)
#endif
#define CHECK_VASTATUS(va_status,func) \
if (va_status != VA_STATUS_SUCCESS) { \
/*fprintf(stderr,"%s:%s (%d) failed,exit\n", __func__, func, __LINE__);*/ \
rfbClientErr("%s:%s:%d failed (0x%x),exit\n", __func__, func, __LINE__, va_status); \
exit(1); \
} else { \
/*fprintf(stderr,">> SUCCESS for: %s:%s (%d)\n", __func__, func, __LINE__);*/ \
DebugLog(("%s:%s:%d success\n", __func__, func, __LINE__)); \
}
/*
* Forward declarations
*/
static void h264_decode_frame(int f_width, int f_height, char *framedata, int framesize, int slice_type);
static void SetVAPictureParameterBufferH264(VAPictureParameterBufferH264 *p, int width, int height);
static void SetVASliceParameterBufferH264(VASliceParameterBufferH264 *p);
static void SetVASliceParameterBufferH264_Intra(VASliceParameterBufferH264 *p, int first);
static void put_updated_rectangle(rfbClient *client, int x, int y, int width, int height, int f_width, int f_height, int first_for_frame);
static void nv12_to_rgba(const VAImage vaImage, rfbClient *client, int ch_x, int ch_y, int ch_w, int ch_h);
/* FIXME: get this value from the server instead of hardcoding 32bit pixels */
#define BPP (4 * 8)
static const char *string_of_FOURCC(uint32_t fourcc)
{
static int buf;
static char str[2][5];
buf ^= 1;
str[buf][0] = fourcc;
str[buf][1] = fourcc >> 8;
str[buf][2] = fourcc >> 16;
str[buf][3] = fourcc >> 24;
str[buf][4] = '\0';
return str[buf];
}
static inline const char *string_of_VAImageFormat(VAImageFormat *imgfmt)
{
return string_of_FOURCC(imgfmt->fourcc);
}
static rfbBool
HandleH264 (rfbClient* client, int rx, int ry, int rw, int rh)
{
rfbH264Header hdr;
char *framedata;
DebugLog(("Framebuffer update with H264 (x: %d, y: %d, w: %d, h: %d)\n", rx, ry, rw, rh));
/* First, read the frame size and allocate buffer to store the data */
if (!ReadFromRFBServer(client, (char *)&hdr, sz_rfbH264Header))
return FALSE;
hdr.slice_type = rfbClientSwap32IfLE(hdr.slice_type);
hdr.nBytes = rfbClientSwap32IfLE(hdr.nBytes);
hdr.width = rfbClientSwap32IfLE(hdr.width);
hdr.height = rfbClientSwap32IfLE(hdr.height);
framedata = (char*) malloc(hdr.nBytes);
/* Obtain frame data from the server */
DebugLog(("Reading %d bytes of frame data (type: %d)\n", hdr.nBytes, hdr.slice_type));
if (!ReadFromRFBServer(client, framedata, hdr.nBytes))
return FALSE;
/* First make sure we have a large enough raw buffer to hold the
* decompressed data. In practice, with a fixed BPP, fixed frame
* buffer size and the first update containing the entire frame
* buffer, this buffer allocation should only happen once, on the
* first update.
*/
if ( client->raw_buffer_size < (( rw * rh ) * ( BPP / 8 ))) {
if ( client->raw_buffer != NULL ) {
free( client->raw_buffer );
}
client->raw_buffer_size = (( rw * rh ) * ( BPP / 8 ));
client->raw_buffer = (char*) malloc( client->raw_buffer_size );
rfbClientLog("Allocated raw buffer of %d bytes (%dx%dx%d BPP)\n", client->raw_buffer_size, rw, rh, BPP);
}
/* Decode frame if frame data was sent. Server only sends frame data for the first
* framebuffer update message for a particular frame buffer contents.
* If more than 1 rectangle is updated, the messages after the first one (with
* the H.264 frame) have nBytes == 0.
*/
if (hdr.nBytes > 0) {
DebugLog((" decoding %d bytes of H.264 data\n", hdr.nBytes));
h264_decode_frame(hdr.width, hdr.height, framedata, hdr.nBytes, hdr.slice_type);
}
DebugLog((" updating rectangle (%d, %d)-(%d, %d)\n", rx, ry, rw, rh));
put_updated_rectangle(client, rx, ry, rw, rh, hdr.width, hdr.height, hdr.nBytes != 0);
free(framedata);
return TRUE;
}
static void h264_cleanup_decoder()
{
VAStatus va_status;
rfbClientLog("%s()\n", __FUNCTION__);
if (va_surface_id[0] != VA_INVALID_ID) {
va_status = vaDestroySurfaces(va_dpy, &va_surface_id[0], SURFACE_NUM);
CHECK_VASTATUS(va_status, "vaDestroySurfaces");
}
if (va_context_id) {
va_status = vaDestroyContext(va_dpy, va_context_id);
CHECK_VASTATUS(va_status, "vaDestroyContext");
va_context_id = 0;
}
num_frames = 0;
sid = 0;
frame_id = 0;
field_order_count = 0;
}
static void h264_init_decoder(int width, int height)
{
VAStatus va_status;
if (va_context_id) {
rfbClientLog("%s: va_dpy already initialized\n", __FUNCTION__);
}
if (va_dpy != NULL) {
rfbClientLog("%s: Re-initializing H.264 decoder\n", __FUNCTION__);
}
else {
rfbClientLog("%s: initializing H.264 decoder\n", __FUNCTION__);
/* Attach VA display to local X display */
Display *win_display = (Display *)XOpenDisplay(":0.0");
if (win_display == NULL) {
rfbClientErr("Can't connect to local display\n");
exit(-1);
}
int major_ver, minor_ver;
va_dpy = vaGetDisplay(win_display);
va_status = vaInitialize(va_dpy, &major_ver, &minor_ver);
CHECK_VASTATUS(va_status, "vaInitialize");
rfbClientLog("%s: libva version %d.%d found\n", __FUNCTION__, major_ver, minor_ver);
}
/* Check for VLD entrypoint */
int num_entrypoints;
VAEntrypoint entrypoints[5];
int vld_entrypoint_found = 0;
/* Change VAProfileH264High if needed */
VAProfile profile = VAProfileH264High;
va_status = vaQueryConfigEntrypoints(va_dpy, profile, entrypoints, &num_entrypoints);
CHECK_VASTATUS(va_status, "vaQueryConfigEntrypoints");
int i;
for (i = 0; i < num_entrypoints; ++i) {
if (entrypoints[i] == VAEntrypointVLD) {
vld_entrypoint_found = 1;
break;
}
}
if (vld_entrypoint_found == 0) {
rfbClientErr("VLD entrypoint not found\n");
exit(1);
}
/* Create configuration for the decode pipeline */
VAConfigAttrib attrib;
attrib.type = VAConfigAttribRTFormat;
va_status = vaCreateConfig(va_dpy, profile, VAEntrypointVLD, &attrib, 1, &va_config_id);
CHECK_VASTATUS(va_status, "vaCreateConfig");
/* Create VA surfaces */
for (i = 0; i < SURFACE_NUM; ++i) {
va_surface_id[i] = VA_INVALID_ID;
va_pic_param_buf_id[i] = VA_INVALID_ID;
va_mat_param_buf_id[i] = VA_INVALID_ID;
va_sp_param_buf_id[i] = VA_INVALID_ID;
va_d_param_buf_id[i] = VA_INVALID_ID;
}
va_status = vaCreateSurfaces(va_dpy, width, height, VA_RT_FORMAT_YUV420, SURFACE_NUM, &va_surface_id[0]);
CHECK_VASTATUS(va_status, "vaCreateSurfaces");
for (i = 0; i < SURFACE_NUM; ++i) {
DebugLog(("%s: va_surface_id[%d] = %p\n", __FUNCTION__, i, va_surface_id[i]));
}
/* Create VA context */
va_status = vaCreateContext(va_dpy, va_config_id, width, height, 0/*VA_PROGRESSIVE*/, &va_surface_id[0], SURFACE_NUM, &va_context_id);
CHECK_VASTATUS(va_status, "vaCreateContext");
DebugLog(("%s: VA context created (id: %d)\n", __FUNCTION__, va_context_id));
/* Instantiate decode pipeline */
va_status = vaBeginPicture(va_dpy, va_context_id, va_surface_id[0]);
CHECK_VASTATUS(va_status, "vaBeginPicture");
rfbClientLog("%s: H.264 decoder initialized\n", __FUNCTION__);
}
static void h264_decode_frame(int f_width, int f_height, char *framedata, int framesize, int slice_type)
{
VAStatus va_status;
DebugLog(("%s: called for frame of %d bytes (%dx%d) slice_type=%d\n", __FUNCTION__, framesize, width, height, slice_type));
/* Initialize decode pipeline if necessary */
if ( (f_width > cur_width) || (f_height > cur_height) ) {
if (va_dpy != NULL)
h264_cleanup_decoder();
cur_width = f_width;
cur_height = f_height;
h264_init_decoder(f_width, f_height);
rfbClientLog("%s: decoder initialized\n", __FUNCTION__);
}
/* Decode frame */
static VAPictureH264 va_picture_h264, va_old_picture_h264;
/* The server should always send an I-frame when a new client connects
* or when the resolution of the framebuffer changes, but we check
* just in case.
*/
if ( (slice_type != SLICE_TYPE_I) && (num_frames == 0) ) {
rfbClientLog("First frame is not an I frame !!! Skipping!!!\n");
return;
}
DebugLog(("%s: frame_id=%d va_surface_id[%d]=0x%x field_order_count=%d\n", __FUNCTION__, frame_id, sid, va_surface_id[sid], field_order_count));
va_picture_h264.picture_id = va_surface_id[sid];
va_picture_h264.frame_idx = frame_id;
va_picture_h264.flags = 0;
va_picture_h264.BottomFieldOrderCnt = field_order_count;
va_picture_h264.TopFieldOrderCnt = field_order_count;
/* Set up picture parameter buffer */
if (va_pic_param_buf_id[sid] == VA_INVALID_ID) {
va_status = vaCreateBuffer(va_dpy, va_context_id, VAPictureParameterBufferType, sizeof(VAPictureParameterBufferH264), 1, NULL, &va_pic_param_buf_id[sid]);
CHECK_VASTATUS(va_status, "vaCreateBuffer(PicParam)");
}
CHECK_SURF(va_surface_id[sid]);
VAPictureParameterBufferH264 *pic_param_buf = NULL;
va_status = vaMapBuffer(va_dpy, va_pic_param_buf_id[sid], (void **)&pic_param_buf);
CHECK_VASTATUS(va_status, "vaMapBuffer(PicParam)");
SetVAPictureParameterBufferH264(pic_param_buf, f_width, f_height);
memcpy(&pic_param_buf->CurrPic, &va_picture_h264, sizeof(VAPictureH264));
if (slice_type == SLICE_TYPE_P) {
memcpy(&pic_param_buf->ReferenceFrames[0], &va_old_picture_h264, sizeof(VAPictureH264));
pic_param_buf->ReferenceFrames[0].flags = 0;
}
else if (slice_type != SLICE_TYPE_I) {
rfbClientLog("Frame type %d not supported!!!\n");
return;
}
pic_param_buf->frame_num = frame_id;
va_status = vaUnmapBuffer(va_dpy, va_pic_param_buf_id[sid]);
CHECK_VASTATUS(va_status, "vaUnmapBuffer(PicParam)");
/* Set up IQ matrix buffer */
if (va_mat_param_buf_id[sid] == VA_INVALID_ID) {
va_status = vaCreateBuffer(va_dpy, va_context_id, VAIQMatrixBufferType, sizeof(VAIQMatrixBufferH264), 1, NULL, &va_mat_param_buf_id[sid]);
CHECK_VASTATUS(va_status, "vaCreateBuffer(IQMatrix)");
}
CHECK_SURF(va_surface_id[sid]);
VAIQMatrixBufferH264 *iq_matrix_buf = NULL;
va_status = vaMapBuffer(va_dpy, va_mat_param_buf_id[sid], (void **)&iq_matrix_buf);
CHECK_VASTATUS(va_status, "vaMapBuffer(IQMatrix)");
static const unsigned char m_MatrixBufferH264[]= {
/* ScalingList4x4[6][16] */
0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
/* ScalingList8x8[2][64] */
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
};
memcpy(iq_matrix_buf, m_MatrixBufferH264, 224);
va_status = vaUnmapBuffer(va_dpy, va_mat_param_buf_id[sid]);
CHECK_VASTATUS(va_status, "vaUnmapBuffer(IQMatrix)");
VABufferID buffer_ids[2];
buffer_ids[0] = va_pic_param_buf_id[sid];
buffer_ids[1] = va_mat_param_buf_id[sid];
CHECK_SURF(va_surface_id[sid]);
va_status = vaRenderPicture(va_dpy, va_context_id, buffer_ids, 2);
CHECK_VASTATUS(va_status, "vaRenderPicture");
/* Set up slice parameter buffer */
if (va_sp_param_buf_id[sid] == VA_INVALID_ID) {
va_status = vaCreateBuffer(va_dpy, va_context_id, VASliceParameterBufferType, sizeof(VASliceParameterBufferH264), 1, NULL, &va_sp_param_buf_id[sid]);
CHECK_VASTATUS(va_status, "vaCreateBuffer(SliceParam)");
}
CHECK_SURF(va_surface_id[sid]);
VASliceParameterBufferH264 *slice_param_buf = NULL;
va_status = vaMapBuffer(va_dpy, va_sp_param_buf_id[sid], (void **)&slice_param_buf);
CHECK_VASTATUS(va_status, "vaMapBuffer(SliceParam)");
static int t2_first = 1;
if (slice_type == SLICE_TYPE_I) {
SetVASliceParameterBufferH264_Intra(slice_param_buf, t2_first);
t2_first = 0;
} else {
SetVASliceParameterBufferH264(slice_param_buf);
memcpy(&slice_param_buf->RefPicList0[0], &va_old_picture_h264, sizeof(VAPictureH264));
slice_param_buf->RefPicList0[0].flags = 0;
}
slice_param_buf->slice_data_bit_offset = 0;
slice_param_buf->slice_data_size = framesize;
va_status = vaUnmapBuffer(va_dpy, va_sp_param_buf_id[sid]);
CHECK_VASTATUS(va_status, "vaUnmapBuffer(SliceParam)");
CHECK_SURF(va_surface_id[sid]);
/* Set up slice data buffer and copy H.264 encoded data */
if (va_d_param_buf_id[sid] == VA_INVALID_ID) {
/* TODO use estimation matching framebuffer dimensions instead of this large value */
va_status = vaCreateBuffer(va_dpy, va_context_id, VASliceDataBufferType, 4177920, 1, NULL, &va_d_param_buf_id[sid]); /* 1080p size */
CHECK_VASTATUS(va_status, "vaCreateBuffer(SliceData)");
}
char *slice_data_buf;
va_status = vaMapBuffer(va_dpy, va_d_param_buf_id[sid], (void **)&slice_data_buf);
CHECK_VASTATUS(va_status, "vaMapBuffer(SliceData)");
memcpy(slice_data_buf, framedata, framesize);
CHECK_SURF(va_surface_id[sid]);
va_status = vaUnmapBuffer(va_dpy, va_d_param_buf_id[sid]);
CHECK_VASTATUS(va_status, "vaUnmapBuffer(SliceData)");
buffer_ids[0] = va_sp_param_buf_id[sid];
buffer_ids[1] = va_d_param_buf_id[sid];
CHECK_SURF(va_surface_id[sid]);
va_status = vaRenderPicture(va_dpy, va_context_id, buffer_ids, 2);
CHECK_VASTATUS(va_status, "vaRenderPicture");
va_status = vaEndPicture(va_dpy, va_context_id);
CHECK_VASTATUS(va_status, "vaEndPicture");
/* Prepare next one... */
int sid_new = (sid + 1) % SURFACE_NUM;
DebugLog(("%s: new Surface ID = %d\n", __FUNCTION__, sid_new));
va_status = vaBeginPicture(va_dpy, va_context_id, va_surface_id[sid_new]);
CHECK_VASTATUS(va_status, "vaBeginPicture");
/* Get decoded data */
va_status = vaSyncSurface(va_dpy, va_surface_id[sid]);
CHECK_VASTATUS(va_status, "vaSyncSurface");
CHECK_SURF(va_surface_id[sid]);
curr_surface = va_surface_id[sid];
sid = sid_new;
field_order_count += 2;
++frame_id;
if (frame_id > 15) {
frame_id = 0;
}
++num_frames;
memcpy(&va_old_picture_h264, &va_picture_h264, sizeof(VAPictureH264));
}
static void put_updated_rectangle(rfbClient *client, int x, int y, int width, int height, int f_width, int f_height, int first_for_frame)
{
if (curr_surface == VA_INVALID_ID) {
rfbClientErr("%s: called, but current surface is invalid\n", __FUNCTION__);
return;
}
VAStatus va_status;
if (client->outputWindow) {
/* use efficient vaPutSurface() method of putting the framebuffer on the screen */
if (first_for_frame) {
/* vaPutSurface() clears window contents outside the given destination rectangle => always update full screen. */
va_status = vaPutSurface(va_dpy, curr_surface, client->outputWindow, 0, 0, f_width, f_height, 0, 0, f_width, f_height, NULL, 0, VA_FRAME_PICTURE);
CHECK_VASTATUS(va_status, "vaPutSurface");
}
}
else if (client->frameBuffer) {
/* ... or copy the changed framebuffer region manually as a fallback */
VAImage decoded_image;
decoded_image.image_id = VA_INVALID_ID;
decoded_image.buf = VA_INVALID_ID;
va_status = vaDeriveImage(va_dpy, curr_surface, &decoded_image);
CHECK_VASTATUS(va_status, "vaDeriveImage");
if ((decoded_image.image_id == VA_INVALID_ID) || (decoded_image.buf == VA_INVALID_ID)) {
rfbClientErr("%s: vaDeriveImage() returned success but VA image is invalid (id: %d, buf: %d)\n", __FUNCTION__, decoded_image.image_id, decoded_image.buf);
}
nv12_to_rgba(decoded_image, client, x, y, width, height);
va_status = vaDestroyImage(va_dpy, decoded_image.image_id);
CHECK_VASTATUS(va_status, "vaDestroyImage");
}
}
static void SetVAPictureParameterBufferH264(VAPictureParameterBufferH264 *p, int width, int height)
{
int i;
unsigned int width_in_mbs = (width + 15) / 16;
unsigned int height_in_mbs = (height + 15) / 16;
memset(p, 0, sizeof(VAPictureParameterBufferH264));
p->picture_width_in_mbs_minus1 = width_in_mbs - 1;
p->picture_height_in_mbs_minus1 = height_in_mbs - 1;
p->num_ref_frames = 1;
p->seq_fields.value = 145;
p->pic_fields.value = 0x501;
for (i = 0; i < 16; i++) {
p->ReferenceFrames[i].flags = VA_PICTURE_H264_INVALID;
p->ReferenceFrames[i].picture_id = 0xffffffff;
}
}
static void SetVASliceParameterBufferH264(VASliceParameterBufferH264 *p)
{
int i;
memset(p, 0, sizeof(VASliceParameterBufferH264));
p->slice_data_size = 0;
p->slice_data_bit_offset = 64;
p->slice_alpha_c0_offset_div2 = 2;
p->slice_beta_offset_div2 = 2;
p->chroma_weight_l0_flag = 1;
p->chroma_weight_l0[0][0]=1;
p->chroma_offset_l0[0][0]=0;
p->chroma_weight_l0[0][1]=1;
p->chroma_offset_l0[0][1]=0;
p->luma_weight_l1_flag = 1;
p->chroma_weight_l1_flag = 1;
p->luma_weight_l0[0]=0x01;
for (i = 0; i < 32; i++) {
p->RefPicList0[i].flags = VA_PICTURE_H264_INVALID;
p->RefPicList1[i].flags = VA_PICTURE_H264_INVALID;
}
p->RefPicList1[0].picture_id = 0xffffffff;
}
static void SetVASliceParameterBufferH264_Intra(VASliceParameterBufferH264 *p, int first)
{
int i;
memset(p, 0, sizeof(VASliceParameterBufferH264));
p->slice_data_size = 0;
p->slice_data_bit_offset = 64;
p->slice_alpha_c0_offset_div2 = 2;
p->slice_beta_offset_div2 = 2;
p->slice_type = 2;
if (first) {
p->luma_weight_l0_flag = 1;
p->chroma_weight_l0_flag = 1;
p->luma_weight_l1_flag = 1;
p->chroma_weight_l1_flag = 1;
} else {
p->chroma_weight_l0_flag = 1;
p->chroma_weight_l0[0][0]=1;
p->chroma_offset_l0[0][0]=0;
p->chroma_weight_l0[0][1]=1;
p->chroma_offset_l0[0][1]=0;
p->luma_weight_l1_flag = 1;
p->chroma_weight_l1_flag = 1;
p->luma_weight_l0[0]=0x01;
}
for (i = 0; i < 32; i++) {
p->RefPicList0[i].flags = VA_PICTURE_H264_INVALID;
p->RefPicList1[i].flags = VA_PICTURE_H264_INVALID;
}
p->RefPicList1[0].picture_id = 0xffffffff;
p->RefPicList0[0].picture_id = 0xffffffff;
}
static void nv12_to_rgba(const VAImage vaImage, rfbClient *client, int ch_x, int ch_y, int ch_w, int ch_h)
{
DebugLog(("%s: converting region (%d, %d)-(%d, %d) from NV12->RGBA\n", __FUNCTION__, ch_x, ch_y, ch_w, ch_h));
VAStatus va_status;
uint8_t *nv12_buf;
va_status = vaMapBuffer(va_dpy, vaImage.buf, (void **)&nv12_buf);
CHECK_VASTATUS(va_status, "vaMapBuffer(DecodedData)");
/* adjust x, y, width, height of the affected area so
* x, y, width and height are always even.
*/
if (ch_x % 2) { --ch_x; ++ch_w; }
if (ch_y % 2) { --ch_y; ++ch_h; }
if ((ch_x + ch_w) % 2) { ++ch_w; }
if ((ch_y + ch_h) % 2) { ++ch_h; }
/* point nv12_buf and dst to upper left corner of changed area */
uint8_t *nv12_y = &nv12_buf[vaImage.offsets[0] + vaImage.pitches[0] * ch_y + ch_x];
uint8_t *nv12_uv = &nv12_buf[vaImage.offsets[1] + vaImage.pitches[1] * (ch_y / 2) + ch_x];
uint32_t *dst = &((uint32_t*)client->frameBuffer)[client->width * ch_y + ch_x];
/* TODO: optimize R, G, B calculation. Possible ways to do this:
* - use lookup tables
* - convert from floating point to integer arithmetic
* - use MMX/SSE to vectorize calculations
* - use GPU (VA VPP, shader...)
*/
int src_x, src_y;
for (src_y = 0; src_y < ch_h; src_y += 2) {
for (src_x = 0; src_x < ch_w; src_x += 2) {
uint8_t nv_u = nv12_uv[src_x];
uint8_t nv_v = nv12_uv[src_x + 1];
uint8_t nv_y[4] = { nv12_y[ src_x], nv12_y[ src_x + 1],
nv12_y[vaImage.pitches[0] + src_x], nv12_y[vaImage.pitches[0] + src_x + 1] };
int i;
for (i = 0; i < 4; ++i) {
double R = 1.164 * (nv_y[i] - 16) + 1.596 * (nv_v - 128);
double G = 1.164 * (nv_y[i] - 16) - 0.391 * (nv_u - 128) - 0.813 * (nv_v - 128);
double B = 1.164 * (nv_y[i] - 16) + 2.018 * (nv_u - 128);
/* clamp R, G, B values. For some Y, U, V combinations,
* the results of the above calculations fall outside of
* the range 0-255.
*/
if (R < 0.0) R = 0.0;
if (G < 0.0) G = 0.0;
if (B < 0.0) B = 0.0;
if (R > 255.0) R = 255.0;
if (G > 255.0) G = 255.0;
if (B > 255.0) B = 255.0;
dst[client->width * (i / 2) + src_x + (i % 2)] = 0
| ((unsigned int)(R + 0.5) << client->format.redShift)
| ((unsigned int)(G + 0.5) << client->format.greenShift)
| ((unsigned int)(B + 0.5) << client->format.blueShift);
}
}
nv12_y += 2 * vaImage.pitches[0];
nv12_uv += vaImage.pitches[1];
dst += 2 * client->width;
}
CHECK_SURF(va_surface_id[sid]);
va_status = vaUnmapBuffer(va_dpy, vaImage.buf);
CHECK_VASTATUS(va_status, "vaUnmapBuffer(DecodedData)");
}
#endif /* LIBVNCSERVER_CONFIG_LIBVA */

@ -158,6 +158,10 @@ static void FillRectangle(rfbClient* client, int x, int y, int w, int h, uint32_
static void CopyRectangle(rfbClient* client, uint8_t* buffer, int x, int y, int w, int h) { static void CopyRectangle(rfbClient* client, uint8_t* buffer, int x, int y, int w, int h) {
int j; int j;
if (client->frameBuffer == NULL) {
return;
}
#define COPY_RECT(BPP) \ #define COPY_RECT(BPP) \
{ \ { \
int rs = w * BPP / 8, rs2 = client->width * BPP / 8; \ int rs = w * BPP / 8, rs2 = client->width * BPP / 8; \
@ -260,6 +264,9 @@ static rfbBool HandleZRLE24Up(rfbClient* client, int rx, int ry, int rw, int rh)
static rfbBool HandleZRLE24Down(rfbClient* client, int rx, int ry, int rw, int rh); static rfbBool HandleZRLE24Down(rfbClient* client, int rx, int ry, int rw, int rh);
static rfbBool HandleZRLE32(rfbClient* client, int rx, int ry, int rw, int rh); static rfbBool HandleZRLE32(rfbClient* client, int rx, int ry, int rw, int rh);
#endif #endif
#ifdef LIBVNCSERVER_CONFIG_LIBVA
static rfbBool HandleH264 (rfbClient* client, int rx, int ry, int rw, int rh);
#endif
/* /*
* Server Capability Functions * Server Capability Functions
@ -1344,6 +1351,10 @@ SetFormatAndEncodings(rfbClient* client)
encs[se->nEncodings++] = rfbClientSwap32IfLE(rfbEncodingCoRRE); encs[se->nEncodings++] = rfbClientSwap32IfLE(rfbEncodingCoRRE);
} else if (strncasecmp(encStr,"rre",encStrLen) == 0) { } else if (strncasecmp(encStr,"rre",encStrLen) == 0) {
encs[se->nEncodings++] = rfbClientSwap32IfLE(rfbEncodingRRE); encs[se->nEncodings++] = rfbClientSwap32IfLE(rfbEncodingRRE);
#ifdef LIBVNCSERVER_CONFIG_LIBVA
} else if (strncasecmp(encStr,"h264",encStrLen) == 0) {
encs[se->nEncodings++] = rfbClientSwap32IfLE(rfbEncodingH264);
#endif
} else { } else {
rfbClientLog("Unknown encoding '%.*s'\n",encStrLen,encStr); rfbClientLog("Unknown encoding '%.*s'\n",encStrLen,encStr);
} }
@ -1412,6 +1423,10 @@ SetFormatAndEncodings(rfbClient* client)
encs[se->nEncodings++] = rfbClientSwap32IfLE(client->appData.qualityLevel + encs[se->nEncodings++] = rfbClientSwap32IfLE(client->appData.qualityLevel +
rfbEncodingQualityLevel0); rfbEncodingQualityLevel0);
} }
#ifdef LIBVNCSERVER_CONFIG_LIBVA
encs[se->nEncodings++] = rfbClientSwap32IfLE(rfbEncodingH264);
rfbClientLog("h264 encoding added\n");
#endif
} }
@ -2127,6 +2142,14 @@ HandleRFBServerMessage(rfbClient* client)
break; break;
} }
#endif
#ifdef LIBVNCSERVER_CONFIG_LIBVA
case rfbEncodingH264:
{
if (!HandleH264(client, rect.r.x, rect.r.y, rect.r.w, rect.r.h))
return FALSE;
break;
}
#endif #endif
default: default:
@ -2361,6 +2384,7 @@ HandleRFBServerMessage(rfbClient* client)
#define UNCOMP -8 #define UNCOMP -8
#include "zrle.c" #include "zrle.c"
#undef BPP #undef BPP
#include "h264.c"
/* /*

@ -91,7 +91,11 @@ static rfbBool MallocFrameBuffer(rfbClient* client) {
static void initAppData(AppData* data) { static void initAppData(AppData* data) {
data->shareDesktop=TRUE; data->shareDesktop=TRUE;
data->viewOnly=FALSE; data->viewOnly=FALSE;
#ifdef LIBVNCSERVER_CONFIG_LIBVA
data->encodingsString="h264 tight zrle ultra copyrect hextile zlib corre rre raw";
#else
data->encodingsString="tight zrle ultra copyrect hextile zlib corre rre raw"; data->encodingsString="tight zrle ultra copyrect hextile zlib corre rre raw";
#endif
data->useBGR233=FALSE; data->useBGR233=FALSE;
data->nColours=0; data->nColours=0;
data->forceOwnCmap=FALSE; data->forceOwnCmap=FALSE;
@ -129,6 +133,9 @@ rfbClient* rfbGetClient(int bitsPerSample,int samplesPerPixel,
/* default: use complete frame buffer */ /* default: use complete frame buffer */
client->updateRect.x = -1; client->updateRect.x = -1;
client->frameBuffer = NULL;
client->outputWindow = 0;
client->format.bitsPerPixel = bytesPerPixel*8; client->format.bitsPerPixel = bytesPerPixel*8;
client->format.depth = bitsPerSample*samplesPerPixel; client->format.depth = bitsPerSample*samplesPerPixel;
client->appData.requestedDepth=client->format.depth; client->appData.requestedDepth=client->format.depth;

@ -175,6 +175,7 @@ typedef void (*GotCopyRectProc)(struct _rfbClient* client, int src_x, int src_y,
typedef struct _rfbClient { typedef struct _rfbClient {
uint8_t* frameBuffer; uint8_t* frameBuffer;
unsigned long outputWindow; /* Output Window ID. When set, client application enables libvncclient to perform direct rendering in its window */
int width, height; int width, height;
int endianTest; int endianTest;

@ -514,6 +514,9 @@ typedef struct {
#define rfbEncodingSupportedEncodings 0xFFFE0002 #define rfbEncodingSupportedEncodings 0xFFFE0002
#define rfbEncodingServerIdentity 0xFFFE0003 #define rfbEncodingServerIdentity 0xFFFE0003
#ifdef LIBVNCSERVER_CONFIG_LIBVA
#define rfbEncodingH264 0x48323634
#endif
/***************************************************************************** /*****************************************************************************
* *
@ -868,6 +871,21 @@ typedef struct {
#endif #endif
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* h264 - h264 encoding. We have an rfbH264Header structure
* giving the number of bytes following. Finally the data follows is
* h264 encoded frame.
*/
typedef struct {
uint32_t nBytes;
uint32_t slice_type;
uint32_t width;
uint32_t height;
} rfbH264Header;
#define sz_rfbH264Header 16
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* XCursor encoding. This is a special encoding used to transmit X-style * XCursor encoding. This is a special encoding used to transmit X-style
* cursor shapes from server to clients. Note that for this encoding, * cursor shapes from server to clients. Note that for this encoding,

Loading…
Cancel
Save