diff -Naur --exclude=.svn mythtv.ori/configure mythtv/configure --- mythtv.ori/configure 2009-01-23 16:38:27.000000000 +1100 +++ mythtv/configure 2009-02-10 14:01:34.000000000 +1100 @@ -170,6 +170,7 @@ echo " --disable-xvmcw disable XvMC Wrapper, use --xvmc-lib if autodetection fails" echo " --enable-xvmc-pro enable XvMC for the unichrome pro (NOT unichrome) chipset" echo " --enable-xvmc-opengl enable nVidia XvMC OpenGL texture method" + echo " --enable-vdpau enable NVidia VDPAU hardware acceleration." echo " --xvmc-lib=LIB XvMC library override (for crosscompiling)" #echo " --disable-xvmc-vld disable XvMC-VLD (VIA Epia MPEG accel.)" echo " --enable-opengl-video enable OpenGL based video display " @@ -878,6 +879,7 @@ termios_h threads winsock2_h + vdpau " MYTHTV_CONFIG_LIST=' @@ -929,6 +931,7 @@ darwin_da dvdv opengl + vdpau ' CMDLINE_SELECT=" @@ -942,6 +945,7 @@ optimizations shared static + vdpau " # code dependency declarations @@ -980,6 +984,8 @@ wmv1_decoder_deps="h263_decoder" wmv2_decoder_deps="h263_decoder" wmv3_decoder_deps="h263_decoder" +vc1_vdpau_decoder_deps="vdpau" +wmv3_vdpau_decoder_deps="vdpau" zmbv_decoder_deps="zlib" zmbv_encoder_deps="zlib" @@ -1054,6 +1060,7 @@ opengl_video_deps="opengl xv" opengl_vsync_deps="opengl" v4l_deps="backend" +vdpau_deps="x11 vdpau_vdpau_h" xrandr_deps="x11" xv_deps="x11" xvmc_deps="xv X11_extensions_XvMClib_h" @@ -1188,6 +1195,7 @@ x11="yes" x11_include_path="/usr/X11R6/include" xrandr="yes" +vdpau_deps="x11 vdpau_vdpau_h" xv="yes" xvmc="yes" xvmc_lib="" @@ -1210,6 +1218,7 @@ vhook="default" # build settings +add_cflags -D_ISOC99_SOURCE -D_POSIX_C_SOURCE=200112 SHFLAGS='-shared -Wl,-soname,$@' VHOOKSHFLAGS='$(SHFLAGS)' LDLATEFLAGS='-Wl,-rpath-link,\$(BUILD_ROOT)/libavcodec -Wl,-rpath-link,\$(BUILD_ROOT)/libavformat -Wl,-rpath-link,\$(BUILD_ROOT)/libavutil' @@ -2757,11 +2766,13 @@ disable xvmc_vld xvmc_pro elif enabled xvmc_opengl ; then disable xvmc_opengl - echo "Disabling XvMC-opengl. It is only available when linking against libXvMCNIVIDIA" + echo "Disabling XvMC-opengl. It is only available when linking against libXvMCNVIDIA" fi enabled xvmc_pro && enable xvmc_vld +check_header vdpau/vdpau.h +enabled vdpau && has_library libvdpau || disable vdpau # Can only do Mac accel on Mac platform enabled dvdv && test $targetos = darwin || disable dvdv @@ -3015,6 +3026,7 @@ echo echo "# Video Output Support" echo "x11 support ${x11-no}" +if enabled x11 ; then echo "xrandr support ${xrandr-no}" echo "xv support ${xv-no}" echo "XvMC support ${xvmc-no}" @@ -3024,6 +3036,8 @@ if test "$VENDOR_XVMC_LIBS" != "" ; then echo "XvMC libs $VENDOR_XVMC_LIBS" fi + echo "VDPAU support ${vdpau-no}" +fi echo "OpenGL video ${opengl_video-no}" if test x"$targetos" = x"darwin" ; then echo "Mac acceleration ${dvdv-no}" diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/allcodecs.c mythtv/libs/libavcodec/allcodecs.c --- mythtv.ori/libs/libavcodec/allcodecs.c 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/allcodecs.c 2009-02-10 14:01:34.000000000 +1100 @@ -91,6 +91,9 @@ REGISTER_DECODER (H263I, h263i); REGISTER_ENCODER (H263P, h263p); REGISTER_DECODER (H264, h264); +#ifdef HAVE_VDPAU + REGISTER_DECODER (H264_VDPAU, h264_vdpau); +#endif REGISTER_ENCDEC (HUFFYUV, huffyuv); REGISTER_DECODER (IDCIN, idcin); REGISTER_DECODER (INDEO2, indeo2); @@ -105,6 +108,9 @@ REGISTER_DECODER (MJPEGB, mjpegb); REGISTER_DECODER (MMVIDEO, mmvideo); REGISTER_DECODER (MPEG_DVDV, mpeg_dvdv); +#ifdef HAVE_VDPAU + REGISTER_DECODER (MPEG_VDPAU, mpeg_vdpau); +#endif REGISTER_DECODER (MPEG_XVMC, mpeg_xvmc); REGISTER_DECODER (MPEG_XVMC_VLD, mpeg_xvmc_vld); REGISTER_ENCDEC (MPEG1VIDEO, mpeg1video); @@ -152,6 +158,9 @@ REGISTER_DECODER (ULTI, ulti); REGISTER_DECODER (VB, vb); REGISTER_DECODER (VC1, vc1); +#ifdef HAVE_VDPAU + REGISTER_DECODER (VC1_VDPAU, vc1_vdpau); +#endif REGISTER_DECODER (VCR1, vcr1); REGISTER_DECODER (VMDVIDEO, vmdvideo); REGISTER_DECODER (VMNC, vmnc); @@ -164,6 +173,9 @@ REGISTER_ENCDEC (WMV1, wmv1); REGISTER_ENCDEC (WMV2, wmv2); REGISTER_DECODER (WMV3, wmv3); +#ifdef HAVE_VDPAU + REGISTER_DECODER (WMV3_VDPAU, wmv3_vdpau); +#endif REGISTER_DECODER (WNV1, wnv1); REGISTER_DECODER (XAN_WC3, xan_wc3); REGISTER_DECODER (XL, xl); diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/avcodec.h mythtv/libs/libavcodec/avcodec.h --- mythtv.ori/libs/libavcodec/avcodec.h 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/avcodec.h 2009-02-10 14:01:34.000000000 +1100 @@ -171,6 +171,10 @@ CODEC_ID_VP6A, CODEC_ID_AMV, CODEC_ID_VB, + CODEC_ID_MPEGVIDEO_VDPAU, + CODEC_ID_H264_VDPAU, + CODEC_ID_VC1_VDPAU, + CODEC_ID_WMV3_VDPAU, /* various PCM "codecs" */ CODEC_ID_PCM_S16LE= 0x10000, @@ -461,6 +465,8 @@ * This can be used to prevent truncation of the last audio samples. */ #define CODEC_CAP_SMALL_LAST_FRAME 0x0040 +/* Codec can export data for HW decoding (VDPAU). */ +#define CODEC_CAP_HWACCEL_VDPAU 0x0080 //The following defines may change, don't expect compatibility if you use them. #define MB_TYPE_INTRA4x4 0x0001 @@ -747,12 +753,13 @@ #define FF_BUFFER_TYPE_COPY 8 ///< Just a (modified) copy of some other buffer, don't deallocate anything. -#define FF_I_TYPE 1 // Intra -#define FF_P_TYPE 2 // Predicted -#define FF_B_TYPE 3 // Bi-dir predicted -#define FF_S_TYPE 4 // S(GMC)-VOP MPEG4 -#define FF_SI_TYPE 5 -#define FF_SP_TYPE 6 +#define FF_I_TYPE 1 ///< Intra +#define FF_P_TYPE 2 ///< Predicted +#define FF_B_TYPE 3 ///< Bi-dir predicted +#define FF_S_TYPE 4 ///< S(GMC)-VOP MPEG4 +#define FF_SI_TYPE 5 ///< Switching Intra +#define FF_SP_TYPE 6 ///< Switching Predicted +#define FF_BI_TYPE 7 #define FF_BUFFER_HINTS_VALID 0x01 // Buffer hints value is meaningful (if 0 ignore). #define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer. @@ -2199,6 +2206,13 @@ * - decoding: set by decoder */ void *dvdv; /* This is actually a pointer to a DVDV_CurPtrs */ + + /** + * VDPAU Acceleration + * - encoding: forbidden + * - decoding: set by decoder + */ + int vdpau_acceleration; } AVCodecContext; /** @@ -2823,7 +2837,7 @@ } AVCodecParserContext; typedef struct AVCodecParser { - int codec_ids[5]; /* several codec IDs are permitted */ + int codec_ids[6]; /* several codec IDs are permitted */ int priv_data_size; int (*parser_init)(AVCodecParserContext *s); int (*parser_parse)(AVCodecParserContext *s, diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/h263dec.c mythtv/libs/libavcodec/h263dec.c --- mythtv.ori/libs/libavcodec/h263dec.c 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/h263dec.c 2009-02-10 14:01:34.000000000 +1100 @@ -92,6 +92,8 @@ break; case CODEC_ID_VC1: case CODEC_ID_WMV3: + case CODEC_ID_VC1_VDPAU: + case CODEC_ID_WMV3_VDPAU: s->h263_msmpeg4 = 1; s->h263_pred = 1; s->msmpeg4_version=6; diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/h264.c mythtv/libs/libavcodec/h264.c --- mythtv.ori/libs/libavcodec/h264.c 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/h264.c 2009-02-10 14:01:34.000000000 +1100 @@ -72,6 +72,9 @@ static VLC_TYPE run7_vlc_table[96][2]; static const int run7_vlc_table_size = 96; +extern int VDPAU_h264_add_data_chunk(H264Context *h, const uint8_t *buf, int buf_size); +extern int VDPAU_h264_picture_complete(H264Context *h); + static void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp); static void svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc); static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize); @@ -101,6 +104,16 @@ {0,2,0,2,7,10,7,10} }; +static const enum PixelFormat pixfmt_vdpau_h264_baseline_420[] = { + PIX_FMT_VDPAU_H264_BASELINE, + PIX_FMT_NONE}; +static const enum PixelFormat pixfmt_vdpau_h264_main_420[] = { + PIX_FMT_VDPAU_H264_MAIN, + PIX_FMT_NONE}; +static const enum PixelFormat pixfmt_vdpau_h264_high_420[] = { + PIX_FMT_VDPAU_H264_HIGH, + PIX_FMT_NONE}; + static void fill_caches(H264Context *h, int mb_type, int for_deblock){ MpegEncContext * const s = &h->s; const int mb_xy= h->mb_xy; @@ -116,7 +129,8 @@ if(for_deblock && (h->slice_num == 1 || h->slice_table[mb_xy] == h->slice_table[top_xy]) && !FRAME_MBAFF) return; - //wow what a mess, why didn't they simplify the interlacing&intra stuff, i can't imagine that these complex rules are worth it + /* Wow, what a mess, why didn't they simplify the interlacing & intra + * stuff, I can't imagine that these complex rules are worth it. */ topleft_xy = top_xy - 1; topright_xy= top_xy + 1; @@ -2226,10 +2240,8 @@ s->quarter_sample = 1; s->low_delay= 1; - if(avctx->codec_id == CODEC_ID_SVQ3) - avctx->pix_fmt= PIX_FMT_YUVJ420P; - else - avctx->pix_fmt= PIX_FMT_YUV420P; + // Set in decode_postinit() once initial parsing is complete + avctx->pix_fmt = PIX_FMT_NONE; decode_init_vlc(); @@ -2247,6 +2259,35 @@ return 0; } +static int decode_postinit(H264Context *h, SPS *sps){ + AVCodecContext * const avctx= h->s.avctx; + + if (avctx->pix_fmt != PIX_FMT_NONE){ + return 0; + } + + if (avctx->vdpau_acceleration) { + if(h->s.chroma_format >= 2) { + return -2; + } + if (sps->profile_idc == 66) { + avctx->pix_fmt = avctx->get_format(avctx, pixfmt_vdpau_h264_baseline_420); + } else if (sps->profile_idc == 77) { + avctx->pix_fmt = avctx->get_format(avctx, pixfmt_vdpau_h264_main_420); + } else if (sps->profile_idc == 100) { + avctx->pix_fmt = avctx->get_format(avctx, pixfmt_vdpau_h264_high_420); + } else { + return -2; + } + } else if (avctx->codec_id == CODEC_ID_SVQ3) { + avctx->pix_fmt= PIX_FMT_YUVJ420P; + } else { + avctx->pix_fmt= PIX_FMT_YUV420P; + } + + return 0; +} + static int frame_start(H264Context *h){ MpegEncContext * const s = &h->s; int i; @@ -7145,6 +7186,10 @@ ((const char*[]){"Gray","420","422","444"})[sps->chroma_format_idc] ); } + + if (decode_postinit(h, sps) < 0) + return -1; + return 0; } @@ -7277,7 +7322,9 @@ H264Context *hx; int i; - if(context_count == 1) { + if(avctx->vdpau_acceleration) { + return; + } else if(context_count == 1) { decode_slice(avctx, h); } else { for(i = 1; i < context_count; i++) { @@ -7404,8 +7451,26 @@ && (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc) && (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=B_TYPE) && (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==I_TYPE) - && avctx->skip_frame < AVDISCARD_ALL) - context_count++; + && avctx->skip_frame < AVDISCARD_ALL) { +#ifdef HAVE_VDPAU + if (avctx->vdpau_acceleration) { + if(h->is_avc) { + static const uint8_t start_code[] = {0x00, 0x00, 0x01}; + VDPAU_h264_add_data_chunk(h, start_code, sizeof(start_code)); + VDPAU_h264_add_data_chunk(h, &buf[buf_index - consumed], consumed ); + } + else + { + // +/-3: Add back 00 00 01 to start of data + VDPAU_h264_add_data_chunk(h, &buf[buf_index - consumed - 3], consumed + 3); + } + } + else +#endif + { + context_count++; + } + } break; case NAL_DPA: init_get_bits(&hx->s.gb, ptr, bit_length); @@ -7600,13 +7665,20 @@ s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264; s->current_picture_ptr->pict_type= s->pict_type; + h->prev_frame_num_offset= h->frame_num_offset; + h->prev_frame_num= h->frame_num; + if(!s->dropable) { - execute_ref_pic_marking(h, h->mmco, h->mmco_index); h->prev_poc_msb= h->poc_msb; h->prev_poc_lsb= h->poc_lsb; + execute_ref_pic_marking(h, h->mmco, h->mmco_index); } - h->prev_frame_num_offset= h->frame_num_offset; - h->prev_frame_num= h->frame_num; + +#ifdef HAVE_VDPAU + if (avctx->vdpau_acceleration) { + VDPAU_h264_picture_complete(h); + } +#endif /* * FIXME: Error handling code does not seem to support interlaced @@ -7620,8 +7692,11 @@ * past end by one (callers fault) and resync_mb_y != 0 * causes problems for the first MB line, too. */ - if (!FIELD_PICTURE) - ff_er_frame_end(s); +#ifdef HAVE_VDPAU + if (!avctx->vdpau_acceleration) +#endif + if (!FIELD_PICTURE) + ff_er_frame_end(s); MPV_frame_end(s); @@ -7933,4 +8008,34 @@ .flush= flush_dpb, }; +#ifdef HAVE_VDPAU +static int h264_vdpau_decode_init(AVCodecContext *avctx){ + if( avctx->thread_count > 1) + return -1; + if( !(avctx->slice_flags & SLICE_FLAG_CODED_ORDER) ) + return -1; + if( !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD) ){ + dprintf(avctx, "h264.c: VDPAU decoder does not set SLICE_FLAG_ALLOW_FIELD\n"); + } + decode_init(avctx); + + avctx->vdpau_acceleration = 1; + + return 0; +} + +AVCodec h264_vdpau_decoder = { + "h264_vdpau", + CODEC_TYPE_VIDEO, + CODEC_ID_H264_VDPAU, + sizeof(H264Context), + h264_vdpau_decode_init, + NULL, + decode_end, + decode_frame, + CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, + .flush= flush_dpb, +}; +#endif + #include "svq3.c" diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/h264_parser.c mythtv/libs/libavcodec/h264_parser.c --- mythtv.ori/libs/libavcodec/h264_parser.c 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/h264_parser.c 2009-02-10 14:01:34.000000000 +1100 @@ -139,7 +139,9 @@ AVCodecParser h264_parser = { - { CODEC_ID_H264 }, + { CODEC_ID_H264, + CODEC_ID_H264_VDPAU + }, sizeof(H264Context), NULL, h264_parse, diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/imgconvert.c mythtv/libs/libavcodec/imgconvert.c --- mythtv.ori/libs/libavcodec/imgconvert.c 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/imgconvert.c 2009-02-10 14:01:34.000000000 +1100 @@ -266,6 +266,33 @@ [PIX_FMT_XVMC_MPEG2_IDCT] = { .name = "xvmcidct", }, + [PIX_FMT_VDPAU_MPEG1] = { + .name = "vdpau_mpeg1", + }, + [PIX_FMT_VDPAU_MPEG2_SIMPLE] = { + .name = "vdpau_mpeg2_simple", + }, + [PIX_FMT_VDPAU_MPEG2_MAIN] = { + .name = "vdpau_mpeg2_main", + }, + [PIX_FMT_VDPAU_H264_BASELINE] = { + .name = "vdpau_h264_baseline", + }, + [PIX_FMT_VDPAU_H264_MAIN] = { + .name = "vdpau_h264_main", + }, + [PIX_FMT_VDPAU_H264_HIGH] = { + .name = "vdpau_h264_high", + }, + [PIX_FMT_VDPAU_VC1_SIMPLE] = { + .name = "vdpau_vc1_simple", + }, + [PIX_FMT_VDPAU_VC1_MAIN] = { + .name = "vdpau_vc1_main", + }, + [PIX_FMT_VDPAU_VC1_ADVANCED] = { + .name = "vdpau_vc1_advanced", + }, [PIX_FMT_UYYVYY411] = { .name = "uyyvyy411", .nb_channels = 1, diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/libavcodec.pro mythtv/libs/libavcodec/libavcodec.pro --- mythtv.ori/libs/libavcodec/libavcodec.pro 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/libavcodec.pro 2009-02-10 14:01:34.000000000 +1100 @@ -372,6 +372,8 @@ DEFINES += HAVE_DVDV } +contains( HAVE_VDPAU, yes ) { SOURCES *= vdpauvideo.c } + !contains( CONFIG_SWSCALER, yes ) { SOURCES *= imgresample.c } contains( HAVE_GPROF, yes ) { diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/mpeg12.c mythtv/libs/libavcodec/mpeg12.c --- mythtv.ori/libs/libavcodec/mpeg12.c 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/mpeg12.c 2009-02-10 14:01:34.000000000 +1100 @@ -88,10 +88,23 @@ static const enum PixelFormat pixfmt_yuv_420[]= {PIX_FMT_YUV420P,-1}; static const enum PixelFormat pixfmt_yuv_422[]= {PIX_FMT_YUV422P,-1}; static const enum PixelFormat pixfmt_yuv_444[]= {PIX_FMT_YUV444P,-1}; + +extern int VDPAU_mpeg_field_start(MpegEncContext *s); +extern void VDPAU_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, int buf_size, int slice_count); + static const enum PixelFormat pixfmt_xvmc_mpg2_420[] = { PIX_FMT_XVMC_MPEG2_IDCT, PIX_FMT_XVMC_MPEG2_MC, - -1}; + PIX_FMT_NONE}; +static const enum PixelFormat pixfmt_vdpau_mpg1_420[] = { + PIX_FMT_VDPAU_MPEG1, + PIX_FMT_NONE}; +static const enum PixelFormat pixfmt_vdpau_mpg2simple_420[] = { + PIX_FMT_VDPAU_MPEG2_SIMPLE, + PIX_FMT_NONE}; +static const enum PixelFormat pixfmt_vdpau_mpg2main_420[] = { + PIX_FMT_VDPAU_MPEG2_MAIN, + PIX_FMT_NONE}; uint8_t ff_mpeg12_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3]; @@ -1380,8 +1393,40 @@ } } -//Call this function when we know all parameters -//it may be called in different places for mpeg1 and mpeg2 +static void mpeg_set_pixelformat(AVCodecContext *avctx){ + Mpeg1Context *s1 = avctx->priv_data; + MpegEncContext *s = &s1->mpeg_enc_ctx; + + if(avctx->vdpau_acceleration){ + if(s->chroma_format >= 2){ + return -2; + } + if(avctx->sub_id == 1){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_vdpau_mpg1_420); + }else{ + if(avctx->profile == 5){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_vdpau_mpg2simple_420); + }else if(avctx->profile == 4){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_vdpau_mpg2main_420); + }else{ + return -2; + } + } + }else if(avctx->xvmc_acceleration){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_xvmc_mpg2_420); + }else{ + if(s->chroma_format < 2){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_420); + }else if(s->chroma_format == 2){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_422); + }else if(s->chroma_format > 2){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_444); + } + } +} + +/* Call this function when we know all parameters. + * It may be called in different places for MPEG-1 and MPEG-2. */ static int mpeg_decode_postinit(AVCodecContext *avctx){ Mpeg1Context *s1 = avctx->priv_data; MpegEncContext *s = &s1->mpeg_enc_ctx; @@ -1452,31 +1497,23 @@ s->avctx->sample_aspect_ratio= ff_mpeg2_aspect[s->aspect_ratio_info]; } - }//mpeg2 + }//MPEG-2 + + mpeg_set_pixelformat(avctx); - if(avctx->xvmc_acceleration){ - avctx->pix_fmt = avctx->get_format(avctx,pixfmt_xvmc_mpg2_420); - }else{ - if(s->chroma_format < 2){ - avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_420); - }else - if(s->chroma_format == 2){ - avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_422); - }else - if(s->chroma_format > 2){ - avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_444); - } - } //until then pix_fmt may be changed right after codec init if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT ) if( avctx->idct_algo == FF_IDCT_AUTO ) avctx->idct_algo = FF_IDCT_SIMPLE; + if( avctx->vdpau_acceleration) + avctx->idct_algo = FF_IDCT_SIMPLE; + if( avctx->xvmc_vld_hwslice == 1) avctx->idct_algo = FF_IDCT_LIBMPEG2MMX; - //quantization matrixes may need reordering - //if dct permutation is changed + /* Quantization matrices may need reordering + * if DCT permutation is changed. */ memcpy(old_permutation,s->dsp.idct_permutation,64*sizeof(uint8_t)); if (MPV_common_init(s) < 0) @@ -1811,6 +1848,11 @@ XVMC_VLD_field_start(s, avctx); #endif +#ifdef HAVE_VDPAU + if(s->avctx->vdpau_acceleration) + VDPAU_mpeg_field_start(s); +#endif + return 0; } @@ -2113,7 +2155,10 @@ s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG2; - ff_er_frame_end(s); +#ifdef HAVE_VDPAU + if(!s->avctx->vdpau_acceleration) +#endif + ff_er_frame_end(s); MPV_frame_end(s); @@ -2258,16 +2303,15 @@ avctx->has_b_frames= 0; //true? s->low_delay= 1; - if(avctx->xvmc_acceleration){ - avctx->pix_fmt = avctx->get_format(avctx,pixfmt_xvmc_mpg2_420); - }else{ - avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_420); - } + mpeg_set_pixelformat(avctx); if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT ) if( avctx->idct_algo == FF_IDCT_AUTO ) avctx->idct_algo = FF_IDCT_SIMPLE; + if( avctx->vdpau_acceleration == 1) + avctx->idct_algo = FF_IDCT_SIMPLE; + if( avctx->xvmc_vld_hwslice == 1) avctx->idct_algo = FF_IDCT_LIBMPEG2MMX; @@ -2555,6 +2599,14 @@ for(i=0; islice_count; i++) s2->error_count += s2->thread_context[i]->error_count; } + +#ifdef HAVE_VDPAU + if (avctx->vdpau_acceleration) { + /* Fills mpeg12 picture informations before returing from libavcodec. */ + VDPAU_mpeg_picture_complete(s2, buf, buf_size, s->slice_count); + } +#endif + if (slice_end(avctx, picture)) { if(s2->last_picture_ptr || s2->low_delay) //FIXME merge with the stuff in mpeg_decode_slice *data_size = sizeof(AVPicture); @@ -2631,6 +2683,11 @@ return -1; } + if (avctx->vdpau_acceleration) { + s->slice_count++; + break; + } + if(avctx->thread_count > 1){ int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count; if(threshold <= mb_y){ @@ -2818,6 +2875,38 @@ #endif +#ifdef HAVE_VDPAU +static int mpeg_vdpau_decode_init(AVCodecContext *avctx){ + if( avctx->thread_count > 1) + return -1; + if( !(avctx->slice_flags & SLICE_FLAG_CODED_ORDER) ) + return -1; + if( !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD) ){ + dprintf(avctx, "mpeg12.c: VDPAU decoder does not set SLICE_FLAG_ALLOW_FIELD\n"); + } + mpeg_decode_init(avctx); + + // Set in mpeg_decode_postinit() once initial parsing is complete + avctx->pix_fmt = PIX_FMT_NONE; + avctx->vdpau_acceleration = 1; + + return 0; +} + +AVCodec mpeg_vdpau_decoder = { + "mpegvideo_vdpau", + CODEC_TYPE_VIDEO, + CODEC_ID_MPEGVIDEO_VDPAU, + sizeof(Mpeg1Context), + mpeg_vdpau_decode_init, + NULL, + mpeg_decode_end, + mpeg_decode_frame, + CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY, + .flush= ff_mpeg_flush, +}; +#endif + /* this is ugly i know, but the alternative is too make hundreds of vars global and prefix them with ff_mpeg1_ which is far uglier. */ diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/mpegvideo.c mythtv/libs/libavcodec/mpegvideo.c --- mythtv.ori/libs/libavcodec/mpegvideo.c 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/mpegvideo.c 2009-02-10 14:01:34.000000000 +1100 @@ -63,6 +63,7 @@ void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c; +extern int VDPAU_mpeg_field_start(MpegEncContext *s); /* enable all paranoid tests for rounding, overflows, etc... */ //#define PARANOID @@ -1031,6 +1032,10 @@ XVMC_field_end(s); }else #endif +#ifdef HAVE_VDPAU + if(s->avctx->vdpau_acceleration){ + }else +#endif if(s->unrestricted_mv && s->current_picture.reference && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) { draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH ); draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2); diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/mpegvideo_parser.c mythtv/libs/libavcodec/mpegvideo_parser.c --- mythtv.ori/libs/libavcodec/mpegvideo_parser.c 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/mpegvideo_parser.c 2009-02-10 14:01:34.000000000 +1100 @@ -181,6 +181,7 @@ CODEC_ID_MPEG2VIDEO_DVDV, CODEC_ID_MPEG2VIDEO_XVMC, CODEC_ID_MPEG2VIDEO_XVMC_VLD, + CODEC_ID_MPEGVIDEO_VDPAU }, sizeof(ParseContext1), NULL, diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/myth_utils.c mythtv/libs/libavcodec/myth_utils.c --- mythtv.ori/libs/libavcodec/myth_utils.c 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/myth_utils.c 2009-02-10 14:01:34.000000000 +1100 @@ -41,6 +41,7 @@ return "MPEG2VIDEO_XVMC"; case CODEC_ID_MPEG2VIDEO_XVMC_VLD: return "MPEG2VIDEO_XVMC_VLD"; + case CODEC_ID_MPEGVIDEO_VDPAU: return "MPEGVIDEO_VDPAU"; case CODEC_ID_H261: return "H261"; case CODEC_ID_H263: return "H263"; case CODEC_ID_RV10: return "RV10"; @@ -65,6 +66,7 @@ case CODEC_ID_HUFFYUV: return "HUFFYUV"; case CODEC_ID_CYUV: return "CYUV"; case CODEC_ID_H264: return "H264"; + case CODEC_ID_H264_VDPAU: return "H264_VDPAU"; case CODEC_ID_INDEO3: return "INDEO3"; case CODEC_ID_VP3: return "VP3"; case CODEC_ID_THEORA: return "THEORA"; @@ -111,7 +113,9 @@ case CODEC_ID_RV30: return "RV30"; case CODEC_ID_RV40: return "RV40"; case CODEC_ID_VC1: return "VC1"; + case CODEC_ID_VC1_VDPAU: return "VC1_VDPAU"; case CODEC_ID_WMV3: return "WMV3"; + case CODEC_ID_WMV3_VDPAU: return "WMV3_VDPAU"; case CODEC_ID_LOCO: return "LOCO"; case CODEC_ID_WNV1: return "WNV1"; case CODEC_ID_AASC: return "AASC"; diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/parser.c mythtv/libs/libavcodec/parser.c --- mythtv.ori/libs/libavcodec/parser.c 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/parser.c 2009-02-10 14:01:34.000000000 +1100 @@ -44,7 +44,8 @@ parser->codec_ids[1] == codec_id || parser->codec_ids[2] == codec_id || parser->codec_ids[3] == codec_id || - parser->codec_ids[4] == codec_id) + parser->codec_ids[4] == codec_id || + parser->codec_ids[5] == codec_id) goto found; } return NULL; diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/utils.c mythtv/libs/libavcodec/utils.c --- mythtv.ori/libs/libavcodec/utils.c 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/utils.c 2009-02-10 14:01:34.000000000 +1100 @@ -673,6 +673,7 @@ {"context", "context model", OFFSET(context_model), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E}, {"slice_flags", NULL, OFFSET(slice_flags), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"xvmc_acceleration", NULL, OFFSET(xvmc_acceleration), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, +{"vdpau_acceleration", NULL, OFFSET(vdpau_acceleration), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX}, {"mbd", "macroblock decision algorithm (high quality mode)", OFFSET(mb_decision), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "mbd"}, {"simple", "use mbcmp (default)", 0, FF_OPT_TYPE_CONST, FF_MB_DECISION_SIMPLE, INT_MIN, INT_MAX, V|E, "mbd"}, {"bits", "use fewest bits", 0, FF_OPT_TYPE_CONST, FF_MB_DECISION_BITS, INT_MIN, INT_MAX, V|E, "mbd"}, diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/vc1.c mythtv/libs/libavcodec/vc1.c --- mythtv.ori/libs/libavcodec/vc1.c 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/vc1.c 2009-02-10 14:01:34.000000000 +1100 @@ -41,8 +41,22 @@ #define MB_INTRA_VLC_BITS 9 #define DC_VLC_BITS 9 #define AC_VLC_BITS 9 + +extern int VDPAU_vc1_decode_picture(MpegEncContext *s, AVCodecContext *avctx, VC1Context *v, const uint8_t *buf, int buf_size); + static const uint16_t table_mb_intra[64][2]; +#ifdef HAVE_VDPAU +static const enum PixelFormat pixfmt_vdpau_vc1_simple_420[] = { + PIX_FMT_VDPAU_VC1_SIMPLE, + PIX_FMT_NONE}; +static const enum PixelFormat pixfmt_vdpau_vc1_main_420[] = { + PIX_FMT_VDPAU_VC1_MAIN, + PIX_FMT_NONE}; +static const enum PixelFormat pixfmt_vdpau_vc1_advanced_420[] = { + PIX_FMT_VDPAU_VC1_ADVANCED, + PIX_FMT_NONE}; +#endif static inline int decode210(GetBitContext *gb){ if (get_bits1(gb)) @@ -762,6 +776,29 @@ } } +#ifdef HAVE_VDPAU +static int decode_postinit(VC1Context *v, AVCodecContext *avctx) +{ + if (avctx->pix_fmt != PIX_FMT_NONE){ + return 0; + } + + if (avctx->vdpau_acceleration) { // VC1 + if (v->profile == 0) { + avctx->pix_fmt = avctx->get_format(avctx, pixfmt_vdpau_vc1_simple_420); + } else if (v->profile == 1) { + avctx->pix_fmt = avctx->get_format(avctx, pixfmt_vdpau_vc1_main_420); + } else if (v->profile == 3) { + avctx->pix_fmt = avctx->get_format(avctx, pixfmt_vdpau_vc1_advanced_420); + } else { + return -2; + } + } + + return 0; +} +#endif + static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb); /** @@ -932,8 +969,24 @@ if(get_bits1(gb)) { //Display Info - decoding is not affected by it int w, h, ar = 0; av_log(v->s.avctx, AV_LOG_DEBUG, "Display extended info:\n"); - v->s.avctx->width = v->s.width = w = get_bits(gb, 14) + 1; - v->s.avctx->height = v->s.height = h = get_bits(gb, 14) + 1; + // FIXME: The w/h parsed here are the *display* width/height, not the + // coded width/height. Ideally, we should make the commented + // assignments below, but that causes problems: + // * The SW decoder in this file experiences errors, because it + // assumes these assigned values are the coded size: + // [vc1 @ 0x86f2130]concealing 150 DC, 150 AC, 150 MV errors + // * VDPAU also assumes these are the coded size, since this is the + // only size passed to vo_vdpau.c:config(). This causes errors + // during the decode process. + // However, simply removing these assignments is not the complete fix, + // because without them, the stream is displayed at its coded size, + // not this requested display size. Ideally, setting: + // sample_aspect_ratio = (AVRational){w, h} + // in the case when ar is not present/set would persuade other modules + // to scale to this requested size. However, sample_aspect_ratio + // appears to be completely ignored elsewhere. + /*v->s.avctx->width = v->s.width =*/ w = get_bits(gb, 14) + 1; + /*v->s.avctx->height = v->s.height =*/ h = get_bits(gb, 14) + 1; av_log(v->s.avctx, AV_LOG_DEBUG, "Display dimensions: %ix%i\n", w, h); if(get_bits1(gb)) ar = get_bits(gb, 4); @@ -984,13 +1037,13 @@ static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb) { VC1Context *v = avctx->priv_data; - int i, blink, clentry, refdist; + int i, blink, clentry; av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32)); blink = get_bits1(gb); // broken link clentry = get_bits1(gb); // closed entry v->panscanflag = get_bits1(gb); - refdist = get_bits1(gb); // refdist flag + v->refdist_flag = get_bits1(gb); v->s.loop_filter = get_bits1(gb); v->fastuvmc = get_bits1(gb); v->extended_mv = get_bits1(gb); @@ -1011,20 +1064,22 @@ } if(v->extended_mv) v->extended_dmv = get_bits1(gb); - if(get_bits1(gb)) { + v->range_mapy_flag = get_bits1(gb); + if(v->range_mapy_flag) { av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n"); - skip_bits(gb, 3); // Y range, ignored for now + v->range_mapy = get_bits(gb, 3); } - if(get_bits1(gb)) { + v->range_mapuv_flag = get_bits1(gb); + if(v->range_mapuv_flag) { av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n"); - skip_bits(gb, 3); // UV range, ignored for now + v->range_mapuv = get_bits(gb, 3); } av_log(avctx, AV_LOG_DEBUG, "Entry point info:\n" "BrokenLink=%i, ClosedEntry=%i, PanscanFlag=%i\n" "RefDist=%i, Postproc=%i, FastUVMC=%i, ExtMV=%i\n" "DQuant=%i, VSTransform=%i, Overlap=%i, Qmode=%i\n", - blink, clentry, v->panscanflag, refdist, v->s.loop_filter, + blink, clentry, v->panscanflag, v->refdist_flag, v->s.loop_filter, v->fastuvmc, v->extended_mv, v->dquant, v->vstransform, v->overlap, v->quantizer_mode); return 0; @@ -1320,6 +1375,9 @@ if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0; + if(v->postprocflag) + v->postproc = get_bits(gb, 2); + switch(v->s.pict_type) { case I_TYPE: case BI_TYPE: @@ -1339,8 +1397,6 @@ } break; case P_TYPE: - if(v->postprocflag) - v->postproc = get_bits1(gb); if (v->extended_mv) v->mvrange = get_unary(gb, 0, 3); else v->mvrange = 0; v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13 @@ -1430,8 +1486,6 @@ } break; case B_TYPE: - if(v->postprocflag) - v->postproc = get_bits1(gb); if (v->extended_mv) v->mvrange = get_unary(gb, 0, 3); else v->mvrange = 0; v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13 @@ -3843,7 +3897,7 @@ avctx->coded_width = avctx->width; avctx->coded_height = avctx->height; - if (avctx->codec_id == CODEC_ID_WMV3) + if ((avctx->codec_id == CODEC_ID_WMV3) || (avctx->codec_id == CODEC_ID_WMV3_VDPAU)) { int count = 0; @@ -3953,6 +4007,9 @@ MpegEncContext *s = &v->s; AVFrame *pict = data; uint8_t *buf2 = NULL; +#ifdef HAVE_VDPAU + uint8_t *buf_vdpau = buf; +#endif /* no supplementary picture */ if (buf_size == 0) { @@ -3974,8 +4031,14 @@ s->current_picture_ptr= &s->picture[i]; } +#ifdef HAVE_VDPAU + // pxt_fmt calculation for VDPAU. + if (decode_postinit(v, avctx) < 0) + return -1; +#endif + //for advanced profile we may need to parse and unescape data - if (avctx->codec_id == CODEC_ID_VC1) { + if ((avctx->codec_id == CODEC_ID_VC1) || (avctx->codec_id == CODEC_ID_VC1_VDPAU)) { int buf_size2 = 0; buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); @@ -3990,6 +4053,9 @@ if(size <= 0) continue; switch(AV_RB32(start)){ case VC1_CODE_FRAME: +#ifdef HAVE_VDPAU + buf_vdpau = start; +#endif buf_size2 = vc1_unescape_buffer(start + 4, size, buf2); break; case VC1_CODE_ENTRYPOINT: /* it should be before frame data */ @@ -4009,6 +4075,7 @@ divider = find_next_marker(buf, buf + buf_size); if((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD){ av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n"); + av_free(buf2); return -1; } @@ -4074,17 +4141,36 @@ return -1; } +#ifdef HAVE_VDPAU + // MPV_frame_start() calls to get_buffer/videoSurfaces. Now we call + // VDPAU_vc1_field_start where picture-parameters are filled. + // VDPAU_vc1_picture_complete calls to vdpau_decoder_render. + + if (avctx->vdpau_acceleration) { + if (VDPAU_vc1_decode_picture(s, avctx, v, buf_vdpau, (buf + buf_size) - buf_vdpau) < 0) { + av_free(buf2); + return -1; + } + } +#endif + s->me.qpel_put= s->dsp.put_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab; - ff_er_frame_start(s); +#ifdef HAVE_VDPAU + if (!avctx->vdpau_acceleration) { +#endif + ff_er_frame_start(s); - v->bits = buf_size * 8; - vc1_decode_blocks(v); + v->bits = buf_size * 8; + vc1_decode_blocks(v); //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), buf_size*8); // if(get_bits_count(&s->gb) > buf_size * 8) // return -1; - ff_er_frame_end(s); + ff_er_frame_end(s); +#ifdef HAVE_VDPAU + } +#endif MPV_frame_end(s); @@ -4154,3 +4240,46 @@ CODEC_CAP_DELAY, NULL }; + +#ifdef HAVE_VDPAU +static int vc1_vdpau_decode_init(AVCodecContext *avctx){ + if( avctx->thread_count > 1) + return -1; + if( !(avctx->slice_flags & SLICE_FLAG_CODED_ORDER) ) + return -1; + if( !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD) ){ + dprintf(avctx, "vc1.c: VDPAU decoder does not set SLICE_FLAG_ALLOW_FIELD\n"); + } + avctx->vdpau_acceleration = 1; + vc1_decode_init(avctx); + avctx->pix_fmt = PIX_FMT_NONE; + + return 0; +} + +AVCodec wmv3_vdpau_decoder = { + "wmv3_vdpau", + CODEC_TYPE_VIDEO, + CODEC_ID_WMV3_VDPAU, + sizeof(VC1Context), + vc1_vdpau_decode_init, + NULL, + vc1_decode_end, + vc1_decode_frame, + CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, + NULL, +}; + +AVCodec vc1_vdpau_decoder = { + "vc1_vdpau", + CODEC_TYPE_VIDEO, + CODEC_ID_VC1_VDPAU, + sizeof(VC1Context), + vc1_vdpau_decode_init, + NULL, + vc1_decode_end, + vc1_decode_frame, + CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, + NULL, +}; +#endif diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/vc1.h mythtv/libs/libavcodec/vc1.h --- mythtv.ori/libs/libavcodec/vc1.h 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/vc1.h 2009-02-10 14:01:34.000000000 +1100 @@ -183,6 +183,7 @@ int interlace; ///< Progressive/interlaced (RPTFTM syntax element) int tfcntrflag; ///< TFCNTR present int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present + int refdist_flag; ///< int extended_dmv; ///< Additional extended dmv range at P/B frame-level int color_prim; ///< 8bits, chroma coordinates of the color primaries int transfer_char; ///< 8bits, Opto-electronic transfer characteristics diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/vc1_parser.c mythtv/libs/libavcodec/vc1_parser.c --- mythtv.ori/libs/libavcodec/vc1_parser.c 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavcodec/vc1_parser.c 2009-02-10 14:01:34.000000000 +1100 @@ -109,7 +109,9 @@ } AVCodecParser vc1_parser = { - { CODEC_ID_VC1 }, + { CODEC_ID_VC1, + CODEC_ID_VC1_VDPAU + }, sizeof(ParseContext1), NULL, vc1_parse, diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/vdpau_render.h mythtv/libs/libavcodec/vdpau_render.h --- mythtv.ori/libs/libavcodec/vdpau_render.h 1970-01-01 10:00:00.000000000 +1000 +++ mythtv/libs/libavcodec/vdpau_render.h 2009-02-10 14:01:34.000000000 +1100 @@ -0,0 +1,61 @@ +/* + * Video Decode and Presentation API for UNIX (VDPAU) is used for + * HW decode acceleration for MPEG-1/2, H.264 and VC-1. + * + * Copyright (C) 2008 NVIDIA. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef FFMPEG_VDPAU_RENDER_H +#define FFMPEG_VDPAU_RENDER_H + +#include "vdpau/vdpau.h" +#include "vdpau/vdpau_x11.h" + +/** + * \brief The videoSurface is used for render. + */ +#define MP_VDPAU_STATE_USED_FOR_RENDER 1 + +/** + * \brief The videoSurface is needed for reference/prediction, + * codec manipulates this. + */ +#define MP_VDPAU_STATE_USED_FOR_REFERENCE 2 + +#define MP_VDPAU_RENDER_MAGIC 0x1DC8E14B + +typedef struct { + int magic; + + VdpVideoSurface surface; //used as rendered surface, never changed. + + int state; // Holds MP_VDPAU_STATE_* values + + union _VdpPictureInfo { + VdpPictureInfoMPEG1Or2 mpeg; + VdpPictureInfoH264 h264; + VdpPictureInfoVC1 vc1; + } info; + + int bitstreamBuffersAlloced; + int bitstreamBuffersUsed; + VdpBitstreamBuffer *bitstreamBuffers; +} vdpau_render_state_t; + +#endif /* FFMPEG_VDPAU_RENDER_H */ diff -Naur --exclude=.svn mythtv.ori/libs/libavcodec/vdpauvideo.c mythtv/libs/libavcodec/vdpauvideo.c --- mythtv.ori/libs/libavcodec/vdpauvideo.c 1970-01-01 10:00:00.000000000 +1000 +++ mythtv/libs/libavcodec/vdpauvideo.c 2009-02-10 14:01:34.000000000 +1100 @@ -0,0 +1,428 @@ +/* + * Video Decode and Presentation API for UNIX (VDPAU) is used for + * HW decode acceleration for MPEG-1/2, H.264 and VC-1. + * + * Copyright (c) 2008 NVIDIA. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include "avcodec.h" +#include "dsputil.h" +#include "mpegvideo.h" +#include "h264.h" +#include "vc1.h" + +#undef NDEBUG +#include + +#include "vdpau_render.h" + +#define ARSIZE(_x_) (sizeof(_x_) / sizeof((_x_)[0])) + +static void VDPAU_ensure_has_buffers(vdpau_render_state_t * render, int need_entries) +{ + int new_alloced; + + if (render->bitstreamBuffersAlloced >= need_entries) { + return; + } + + if (!render->bitstreamBuffersAlloced || !render->bitstreamBuffers) { + new_alloced = 4; + } + else { + new_alloced = render->bitstreamBuffersAlloced * 2; + } + + render->bitstreamBuffers = av_realloc( + render->bitstreamBuffers, + new_alloced * sizeof(render->bitstreamBuffers[0]) + ); + render->bitstreamBuffersAlloced = new_alloced; +} + +int VDPAU_mpeg_field_start(MpegEncContext *s) +{ + vdpau_render_state_t * render,* last, * next; + int i; + + render = (vdpau_render_state_t*)s->current_picture.data[2]; + assert(render != NULL); + assert(render->magic == MP_VDPAU_RENDER_MAGIC); + if ((render == NULL) || (render->magic != MP_VDPAU_RENDER_MAGIC)) { + return -1; // make sure that this is render packet + } + + /* fill VdpPictureInfoMPEG1Or2 struct */ + render->info.mpeg.picture_structure = s->picture_structure; + render->info.mpeg.picture_coding_type = s->pict_type; + render->info.mpeg.intra_dc_precision = s->intra_dc_precision; + render->info.mpeg.frame_pred_frame_dct = s->frame_pred_frame_dct; + render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors; + render->info.mpeg.intra_vlc_format = s->intra_vlc_format; + render->info.mpeg.alternate_scan = s->alternate_scan; + render->info.mpeg.q_scale_type = s->q_scale_type; + render->info.mpeg.top_field_first = s->top_field_first; + render->info.mpeg.full_pel_forward_vector = s->full_pel[0]; // MPEG-1 only. Set 0 for MPEG-2 + render->info.mpeg.full_pel_backward_vector = s->full_pel[1]; // MPEG-1 only. Set 0 for MPEG-2 + render->info.mpeg.f_code[0][0] = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert. + render->info.mpeg.f_code[0][1] = s->mpeg_f_code[0][1]; + render->info.mpeg.f_code[1][0] = s->mpeg_f_code[1][0]; + render->info.mpeg.f_code[1][1] = s->mpeg_f_code[1][1]; + for (i = 0; i < 64; ++i) { + render->info.mpeg.intra_quantizer_matrix[i] = s->intra_matrix[i]; + render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i]; + } + + render->info.mpeg.forward_reference = VDP_INVALID_HANDLE; + render->info.mpeg.backward_reference = VDP_INVALID_HANDLE; + + switch(s->pict_type){ + case FF_I_TYPE: + return 0; // no prediction from other frames + case FF_B_TYPE: + next = (vdpau_render_state_t*)s->next_picture.data[2]; + assert(next != NULL); + assert(next->magic == MP_VDPAU_RENDER_MAGIC); + if ((next == NULL) || (next->magic != MP_VDPAU_RENDER_MAGIC)) { + return -1; + } + render->info.mpeg.backward_reference = next->surface; + // no return here, going to set forward prediction + case FF_P_TYPE: + last = (vdpau_render_state_t*)s->last_picture.data[2]; + assert(last->magic == MP_VDPAU_RENDER_MAGIC); + if (last->magic != MP_VDPAU_RENDER_MAGIC) { + return -1; + } + if (last == NULL) { // FIXME: Does this test make sense? + last = render; // predict second field from the first + } + render->info.mpeg.forward_reference = last->surface; + return 0; + } + + return -1; +} + +int VDPAU_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, int buf_size, int slice_count) +{ + vdpau_render_state_t * render; + + if (!(s->current_picture_ptr)) + return -1; + + render = (vdpau_render_state_t*)s->current_picture_ptr->data[2]; + assert(render != NULL); + assert(render->magic == MP_VDPAU_RENDER_MAGIC); + if ((render == NULL) || (render->magic != MP_VDPAU_RENDER_MAGIC)) { + return -1; // make sure that this is render packet + } + + VDPAU_ensure_has_buffers(render, 1); + + render->bitstreamBuffers[0].struct_version = VDP_BITSTREAM_BUFFER_VERSION; + render->bitstreamBuffers[0].bitstream_bytes = buf_size; + render->bitstreamBuffers[0].bitstream = buf; + render->bitstreamBuffersUsed = 1; + + render->info.mpeg.slice_count = slice_count; + + if (slice_count > 0) { + ff_draw_horiz_band(s, 0, s->avctx->height); + } + render->bitstreamBuffersUsed = 0; + + return 0; +} + +int VDPAU_h264_set_reference_frames(H264Context *h) +{ + MpegEncContext * s = &h->s; + vdpau_render_state_t * render, * render_ref; + VdpReferenceFrameH264 * rf, * rf2; + Picture * pic; + int i, list; + + render = (vdpau_render_state_t*)s->current_picture_ptr->data[2]; + assert(render != NULL); + assert(render->magic == MP_VDPAU_RENDER_MAGIC); + if ((render == NULL) || (render->magic != MP_VDPAU_RENDER_MAGIC)) + return -1; // make sure that this is render packet + + rf = &render->info.h264.referenceFrames[0]; +#define H264_RF_COUNT ARSIZE(render->info.h264.referenceFrames) + + for (list = 0; list < 2; ++list) { + Picture **lp = list ? h->long_ref : h->short_ref; + int ls = list ? h->long_ref_count : h->short_ref_count; + + for (i = 0; i < ls; ++i) { + pic = lp[i]; + if (!pic || !pic->reference) { + continue; + } + + render_ref = (vdpau_render_state_t*)pic->data[2]; + assert(render_ref != NULL); + if (render_ref == NULL) + return -1; // make sure that this is render packet + + rf2 = &render->info.h264.referenceFrames[0]; + while (rf2 != rf) { + if ( + (rf2->surface == render_ref->surface) + && (rf2->is_long_term == pic->long_ref) + && (rf2->frame_idx == pic->frame_num) + ) { + break; + } + ++rf2; + } + if (rf2 != rf) { + rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; + rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; + continue; + } + + if (rf >= &render->info.h264.referenceFrames[H264_RF_COUNT]) { + continue; + } + + rf->surface = render_ref->surface; + rf->is_long_term = pic->long_ref; + rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; + rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; + rf->field_order_cnt[0] = pic->field_poc[0]; + rf->field_order_cnt[1] = pic->field_poc[1]; + rf->frame_idx = pic->frame_num; + + ++rf; + } + } + + for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf) { + rf->surface = VDP_INVALID_HANDLE; + rf->is_long_term = 0; + rf->top_is_reference = 0; + rf->bottom_is_reference = 0; + rf->field_order_cnt[0] = 0; + rf->field_order_cnt[1] = 0; + rf->frame_idx = 0; + } + + return 0; +} + +extern int VDPAU_h264_add_data_chunk(H264Context *h, const uint8_t *buf, int buf_size) +{ + MpegEncContext * s = &h->s; + vdpau_render_state_t * render; + + render = (vdpau_render_state_t*)s->current_picture_ptr->data[2]; + assert(render != NULL); + assert(render->magic == MP_VDPAU_RENDER_MAGIC); + if ((render == NULL) || (render->magic != MP_VDPAU_RENDER_MAGIC)) + return -1; // make sure that this is render packet + + if (!render->bitstreamBuffersUsed) { + VDPAU_h264_set_reference_frames(h); + } + + VDPAU_ensure_has_buffers(render, render->bitstreamBuffersUsed + 1); + + render->bitstreamBuffers[render->bitstreamBuffersUsed].struct_version = VDP_BITSTREAM_BUFFER_VERSION; + render->bitstreamBuffers[render->bitstreamBuffersUsed].bitstream = buf; + render->bitstreamBuffers[render->bitstreamBuffersUsed].bitstream_bytes = buf_size; + render->bitstreamBuffersUsed++; + + return 0; +} + +int VDPAU_h264_picture_complete(H264Context *h) +{ + MpegEncContext * s = &h->s; + vdpau_render_state_t * render; + + render = (vdpau_render_state_t*)s->current_picture_ptr->data[2]; + assert(render != NULL); + assert(render->magic == MP_VDPAU_RENDER_MAGIC); + if ((render == NULL) || (render->magic != MP_VDPAU_RENDER_MAGIC)) + return -1; // make sure that this is render packet + + render->info.h264.slice_count = h->slice_num; + if (render->info.h264.slice_count < 1) + return 0; + + int i; + for (i = 0; i < 2; ++i) { + int foc = s->current_picture_ptr->field_poc[i]; + if (foc == INT_MAX) { + foc = 0; + } + render->info.h264.field_order_cnt[i] = foc; + } + + render->info.h264.is_reference = s->current_picture_ptr->reference ? VDP_TRUE : VDP_FALSE; + render->info.h264.frame_num = h->frame_num; + render->info.h264.field_pic_flag = (s->picture_structure != PICT_FRAME) ? 1 : 0; + render->info.h264.bottom_field_flag = (s->picture_structure == PICT_BOTTOM_FIELD) ? 1 : 0; + render->info.h264.num_ref_frames = h->sps.ref_frame_count; + render->info.h264.mb_adaptive_frame_field_flag = h->sps.mb_aff; + render->info.h264.constrained_intra_pred_flag = h->pps.constrained_intra_pred; + render->info.h264.weighted_pred_flag = h->pps.weighted_pred; + render->info.h264.weighted_bipred_idc = h->pps.weighted_bipred_idc; + render->info.h264.frame_mbs_only_flag = h->sps.frame_mbs_only_flag; + render->info.h264.transform_8x8_mode_flag = h->pps.transform_8x8_mode; + render->info.h264.chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0]; + render->info.h264.second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1]; + render->info.h264.pic_init_qp_minus26 = h->pps.init_qp - 26; + render->info.h264.num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1; + render->info.h264.num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1; + render->info.h264.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4; + render->info.h264.pic_order_cnt_type = h->sps.poc_type; + render->info.h264.log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4; + render->info.h264.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag; + render->info.h264.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag; + render->info.h264.entropy_coding_mode_flag = h->pps.cabac; + render->info.h264.pic_order_present_flag = h->pps.pic_order_present; + render->info.h264.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; + render->info.h264.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present; + memcpy(render->info.h264.scaling_lists_4x4, h->pps.scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4)); + memcpy(render->info.h264.scaling_lists_8x8, h->pps.scaling_matrix8, sizeof(render->info.h264.scaling_lists_8x8)); + + ff_draw_horiz_band(s, 0, s->avctx->height); + render->bitstreamBuffersUsed = 0; + + return 0; +} + +int VDPAU_vc1_decode_picture(MpegEncContext *s, AVCodecContext *avctx, VC1Context *v, const uint8_t *buf, int buf_size) +{ + // VC1Context *v = avctx->priv_data; + vdpau_render_state_t * render,* last, * next; + + render = (vdpau_render_state_t*)s->current_picture.data[2]; + assert(render != NULL); + assert(render->magic == MP_VDPAU_RENDER_MAGIC); + if ((render == NULL) || (render->magic != MP_VDPAU_RENDER_MAGIC)) { + return -1; // make sure that this is render packet + } + memset(&(render->info), 0 , sizeof(VdpPictureInfoVC1)); + + /* fill LvPictureInfoVC1 struct */ + render->info.vc1.frame_coding_mode = v->fcm; + render->info.vc1.postprocflag = v->postprocflag; + render->info.vc1.pulldown = v->broadcast; + render->info.vc1.interlace = v->interlace; + render->info.vc1.tfcntrflag = v->tfcntrflag; + render->info.vc1.finterpflag = v->finterpflag; + render->info.vc1.psf = v->psf; + render->info.vc1.dquant = v->dquant; + render->info.vc1.panscan_flag = v->panscanflag; + render->info.vc1.refdist_flag = v->refdist_flag; + render->info.vc1.quantizer = v->quantizer_mode; + render->info.vc1.extended_mv = v->extended_mv; + render->info.vc1.extended_dmv = v->extended_dmv; + render->info.vc1.overlap = v->overlap; + render->info.vc1.vstransform = v->vstransform; + render->info.vc1.loopfilter = v->s.loop_filter; + render->info.vc1.fastuvmc = v->fastuvmc; + render->info.vc1.range_mapy_flag = v->range_mapy_flag; + render->info.vc1.range_mapy = v->range_mapy; + render->info.vc1.range_mapuv_flag = v->range_mapuv_flag; + render->info.vc1.range_mapuv = v->range_mapuv; + /* Specific to simple/main profile only */ + render->info.vc1.multires = v->multires; + render->info.vc1.syncmarker = v->s.resync_marker; + render->info.vc1.rangered = v->rangered; + render->info.vc1.maxbframes = v->s.max_b_frames; + /* Presently, making these as 0 */ + render->info.vc1.deblockEnable = 0; + render->info.vc1.pquant = 0; + + render->info.vc1.forward_reference = VDP_INVALID_HANDLE; + render->info.vc1.backward_reference = VDP_INVALID_HANDLE; + + switch(s->pict_type){ + case FF_I_TYPE: + render->info.vc1.picture_type = 0; + break; + case FF_B_TYPE: + if (v->bi_type) { + render->info.vc1.picture_type = 4; + } + else { + render->info.vc1.picture_type = 3; + } + break; + case FF_P_TYPE: + render->info.vc1.picture_type = 1; + break; + case FF_BI_TYPE: + render->info.vc1.picture_type = 4; + break; + default: + return -1; + } + + switch(s->pict_type){ + case FF_I_TYPE: + case FF_BI_TYPE: + break; + case FF_B_TYPE: + next = (vdpau_render_state_t*)s->next_picture.data[2]; + assert(next != NULL); + assert(next->magic == MP_VDPAU_RENDER_MAGIC); + if ((next == NULL) || (next->magic != MP_VDPAU_RENDER_MAGIC)) { + return -1; + } + render->info.vc1.backward_reference = next->surface; + // no break here, going to set forward prediction + case FF_P_TYPE: + last = (vdpau_render_state_t*)s->last_picture.data[2]; + assert(last->magic == MP_VDPAU_RENDER_MAGIC); + if (last->magic != MP_VDPAU_RENDER_MAGIC) { + return -1; + } + if (last == NULL) { // FIXME: Does this test make sense? + last = render; // predict second field from the first + } + render->info.vc1.forward_reference = last->surface; + break; + default: + return -1; + } + + VDPAU_ensure_has_buffers(render, 1); + + render->bitstreamBuffers[0].struct_version = VDP_BITSTREAM_BUFFER_VERSION; + render->bitstreamBuffers[0].bitstream_bytes = buf_size; + render->bitstreamBuffers[0].bitstream = buf; + render->bitstreamBuffersUsed = 1; + + // FIXME: I am not sure about how MPlayer calculates slice number. + render->info.vc1.slice_count = 1; + + ff_draw_horiz_band(s, 0, s->avctx->height); + render->bitstreamBuffersUsed = 0; + + return 0; +} + diff -Naur --exclude=.svn mythtv.ori/libs/libavutil/avutil.h mythtv/libs/libavutil/avutil.h --- mythtv.ori/libs/libavutil/avutil.h 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libavutil/avutil.h 2009-02-10 14:01:34.000000000 +1100 @@ -106,6 +106,15 @@ PIX_FMT_YUV440P, ///< Planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) PIX_FMT_YUVJ440P, ///< Planar YUV 4:4:0 full scale (jpeg) PIX_FMT_YUVA420P, ///< Planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) + PIX_FMT_VDPAU_MPEG1, + PIX_FMT_VDPAU_MPEG2_SIMPLE, + PIX_FMT_VDPAU_MPEG2_MAIN, + PIX_FMT_VDPAU_H264_BASELINE, + PIX_FMT_VDPAU_H264_MAIN, + PIX_FMT_VDPAU_H264_HIGH, + PIX_FMT_VDPAU_VC1_SIMPLE, + PIX_FMT_VDPAU_VC1_MAIN, + PIX_FMT_VDPAU_VC1_ADVANCED, PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions }; diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/NuppelVideoPlayer.cpp mythtv/libs/libmythtv/NuppelVideoPlayer.cpp --- mythtv.ori/libs/libmythtv/NuppelVideoPlayer.cpp 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libmythtv/NuppelVideoPlayer.cpp 2009-02-10 14:01:34.000000000 +1100 @@ -5641,11 +5641,13 @@ case kCodec_MPEG1_IDCT: case kCodec_MPEG1_VLD: case kCodec_MPEG1_DVDV: + case kCodec_MPEG1_VDPAU: case kCodec_MPEG2: case kCodec_MPEG2_XVMC: case kCodec_MPEG2_IDCT: case kCodec_MPEG2_VLD: case kCodec_MPEG2_DVDV: + case kCodec_MPEG2_VDPAU: return "MPEG-2"; case kCodec_H263: @@ -5653,6 +5655,7 @@ case kCodec_H263_IDCT: case kCodec_H263_VLD: case kCodec_H263_DVDV: + case kCodec_H263_VDPAU: return "H.263"; case kCodec_NUV_MPEG4: @@ -5661,6 +5664,7 @@ case kCodec_MPEG4_XVMC: case kCodec_MPEG4_VLD: case kCodec_MPEG4_DVDV: + case kCodec_MPEG4_VDPAU: return "MPEG-4"; case kCodec_H264: @@ -5668,6 +5672,7 @@ case kCodec_H264_IDCT: case kCodec_H264_VLD: case kCodec_H264_DVDV: + case kCodec_H264_VDPAU: return "H.264"; case kCodec_NONE: diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/avformatdecoder.cpp mythtv/libs/libmythtv/avformatdecoder.cpp --- mythtv.ori/libs/libmythtv/avformatdecoder.cpp 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libmythtv/avformatdecoder.cpp 2009-02-10 14:01:34.000000000 +1100 @@ -37,13 +37,22 @@ } #endif // USING_XVMC +#ifdef USING_VDPAU +#include "videoout_xv.h" +extern "C" { +#include "libavcodec/vdpau_render.h" +} +#endif // USING_VDPAU + extern "C" { #include "../libavutil/avutil.h" #include "../libavcodec/ac3_parser.h" +#include "../libavcodec/mpegvideo.h" +#include "../libavformat/avio.h" #include "../libmythmpeg2/mpeg2.h" #include "ivtv_myth.h" -// from libavcodec -extern const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state); +//// from libavcodec +//extern const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state); } #define LOC QString("AFD: ") @@ -73,6 +82,11 @@ int offset[4], int y, int type, int height); void decode_cc_dvd(struct AVCodecContext *c, const uint8_t *buf, int buf_size); +int get_avf_buffer_vdpau(struct AVCodecContext *c, AVFrame *pic); +void release_avf_buffer_vdpau(struct AVCodecContext *c, AVFrame *pic); +void render_slice_vdpau(struct AVCodecContext *s, const AVFrame *src, + int offset[4], int y, int type, int height); + static void myth_av_log(void *ptr, int level, const char* fmt, va_list vl) { static QString full_line(""); @@ -1117,6 +1131,17 @@ enc->draw_horiz_band = NULL; directrendering |= selectedStream; } + else if (codec && (codec->id == CODEC_ID_MPEGVIDEO_VDPAU || + codec->id == CODEC_ID_H264_VDPAU || + codec->id == CODEC_ID_VC1_VDPAU || + codec->id == CODEC_ID_WMV3_VDPAU)) + { + enc->get_buffer = get_avf_buffer_vdpau; + enc->release_buffer = release_avf_buffer_vdpau; + enc->draw_horiz_band = render_slice_vdpau; + enc->slice_flags = SLICE_FLAG_CODED_ORDER | SLICE_FLAG_ALLOW_FIELD; + directrendering |= selectedStream; + } else if (codec && codec->capabilities & CODEC_CAP_DR1) { enc->flags |= CODEC_FLAG_EMU_EDGE; @@ -1148,7 +1173,7 @@ } } -#if defined(USING_XVMC) || defined(USING_DVDV) +#if defined(USING_XVMC) || defined(USING_DVDV) || defined(USING_VDPAU) static int mpeg_version(int codec_id) { switch (codec_id) @@ -1159,23 +1184,36 @@ case CODEC_ID_MPEG2VIDEO_XVMC: case CODEC_ID_MPEG2VIDEO_XVMC_VLD: case CODEC_ID_MPEG2VIDEO_DVDV: + case CODEC_ID_MPEGVIDEO_VDPAU: return 2; case CODEC_ID_H263: return 3; case CODEC_ID_MPEG4: return 4; case CODEC_ID_H264: + case CODEC_ID_H264_VDPAU: return 5; + case CODEC_ID_VC1: + case CODEC_ID_VC1_VDPAU: + return 6; + case CODEC_ID_WMV3: + case CODEC_ID_WMV3_VDPAU: + return 7; + default: + break; } return 0; } -#endif // defined(USING_XVMC) || defined(USING_DVDV) -#ifdef USING_XVMC static int xvmc_pixel_format(enum PixelFormat pix_fmt) { (void) pix_fmt; +#ifdef USING_XVMC int xvmc_chroma = XVMC_CHROMA_FORMAT_420; +#else + int xvmc_chroma = 0; +#endif + #if 0 // We don't support other chromas yet if (PIX_FMT_YUV420P == pix_fmt) @@ -1187,7 +1225,7 @@ #endif return xvmc_chroma; } -#endif // USING_XVMC +#endif void default_captions(sinfo_vec_t *tracks, int av_index) { @@ -1377,7 +1415,7 @@ */ void AvFormatDecoder::ScanDSMCCStreams(void) { - if (!ic->cur_pmt_sect) + if (!ic || !ic->cur_pmt_sect) return; if (!itv && ! (itv = GetNVP()->GetInteractiveTV())) @@ -1490,21 +1528,11 @@ uint width = max(enc->width, 16); uint height = max(enc->height, 16); - VideoDisplayProfile vdp; - vdp.SetInput(QSize(width, height)); - QString dec = vdp.GetDecoder(); - uint thread_count = vdp.GetMaxCPUs(); - VERBOSE(VB_PLAYBACK, QString("Using %1 CPUs for decoding") - .arg(ENABLE_THREADS ? thread_count : 1)); - - if (ENABLE_THREADS && thread_count > 1) - { - avcodec_thread_init(enc, thread_count); - enc->thread_count = thread_count; - } + QString dec = "ffmpeg"; + uint thread_count = 1; bool handled = false; -#ifdef USING_XVMC +#if defined(USING_VDPAU) || defined(USING_XVMC) if (!using_null_videoout && mpeg_version(enc->codec_id)) { // HACK -- begin @@ -1538,9 +1566,9 @@ /* xvmc pix fmt */ xvmc_pixel_format(enc->pix_fmt), /* test surface */ kCodec_NORMAL_END > video_codec_id, /* force_xv */ force_xv); - bool vcd, idct, mc; + bool vcd, idct, mc, vdpau; enc->codec_id = (CodecID) - myth2av_codecid(mcid, vcd, idct, mc); + myth2av_codecid(mcid, vcd, idct, mc, vdpau); if (ringBuffer && ringBuffer->isDVD() && (mcid == video_codec_id) && @@ -1576,6 +1604,23 @@ } #endif // USING_XVMC || USING_DVDV + VideoDisplayProfile vdp; + vdp.SetInput(QSize(width, height)); + dec = vdp.GetDecoder(); + thread_count = vdp.GetMaxCPUs(); + + if (video_codec_id > kCodec_NORMAL_END) + thread_count = 1; + + VERBOSE(VB_PLAYBACK, QString("Using %1 CPUs for decoding") + .arg(ENABLE_THREADS ? thread_count : 1)); + + if (ENABLE_THREADS && thread_count > 1) + { + avcodec_thread_init(enc, thread_count); + enc->thread_count = thread_count; + } + if (!handled) { if (CODEC_ID_H264 == enc->codec_id) @@ -2102,6 +2147,76 @@ } } +int get_avf_buffer_vdpau(struct AVCodecContext *c, AVFrame *pic) +{ + AvFormatDecoder *nd = (AvFormatDecoder *)(c->opaque); + VideoFrame *frame = nd->GetNVP()->GetNextVideoFrame(false); + + pic->data[0] = frame->priv[0]; + pic->data[1] = frame->priv[1]; + pic->data[2] = frame->buf; + + pic->linesize[0] = 0; + pic->linesize[1] = 0; + pic->linesize[2] = 0; + + pic->opaque = frame; + pic->type = FF_BUFFER_TYPE_USER; + + pic->age = 256 * 256 * 256 * 64; + + frame->pix_fmt = c->pix_fmt; + +#ifdef USING_VDPAU + vdpau_render_state_t *render = (vdpau_render_state_t *)frame->buf; + render->state |= MP_VDPAU_STATE_USED_FOR_REFERENCE; +#endif + + return 0; +} + +void release_avf_buffer_vdpau(struct AVCodecContext *c, AVFrame *pic) +{ + assert(pic->type == FF_BUFFER_TYPE_USER); + +#ifdef USING_VDPAU + vdpau_render_state_t *render = (vdpau_render_state_t *)pic->data[2]; + render->state &= ~MP_VDPAU_STATE_USED_FOR_REFERENCE; +#endif + + AvFormatDecoder *nd = (AvFormatDecoder *)(c->opaque); + if (nd && nd->GetNVP() && nd->GetNVP()->getVideoOutput()) + nd->GetNVP()->getVideoOutput()->DeLimboFrame((VideoFrame*)pic->opaque); + + for (uint i = 0; i < 4; i++) + pic->data[i] = NULL; +} + +void render_slice_vdpau(struct AVCodecContext *s, const AVFrame *src, + int offset[4], int y, int type, int height) +{ + if (!src) + return; + + (void)offset; + (void)type; + + if (s && src && s->opaque && src->opaque) + { + AvFormatDecoder *nd = (AvFormatDecoder *)(s->opaque); + + int width = s->width; + + VideoFrame *frame = (VideoFrame *)src->opaque; + nd->GetNVP()->DrawSlice(frame, 0, y, width, height); + } + else + { + VERBOSE(VB_IMPORTANT, LOC + + "render_slice_xvmc called with bad avctx or src"); + } +} + void decode_cc_dvd(struct AVCodecContext *s, const uint8_t *buf, int buf_size) { // taken from xine-lib libspucc by Christian Vogler @@ -3247,12 +3362,14 @@ if (context->codec_id == CODEC_ID_MPEG1VIDEO || context->codec_id == CODEC_ID_MPEG2VIDEO || context->codec_id == CODEC_ID_MPEG2VIDEO_XVMC || - context->codec_id == CODEC_ID_MPEG2VIDEO_XVMC_VLD) + context->codec_id == CODEC_ID_MPEG2VIDEO_XVMC_VLD || + context->codec_id == CODEC_ID_MPEGVIDEO_VDPAU) { if (!ringBuffer->isDVD()) MpegPreProcessPkt(curstream, pkt); } - else if (context->codec_id == CODEC_ID_H264) + else if (context->codec_id == CODEC_ID_H264 || + context->codec_id == CODEC_ID_H264_VDPAU) { H264PreProcessPkt(curstream, pkt); } @@ -3926,6 +4043,10 @@ (video_codec_id < kCodec_VLD_END)) return "xvmc-vld"; + if ((video_codec_id > kCodec_DVDV_END) && + (video_codec_id < kCodec_VDPAU_END)) + return "vdpau"; + return "ffmpeg"; } diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/avformatdecoder.h mythtv/libs/libmythtv/avformatdecoder.h --- mythtv.ori/libs/libmythtv/avformatdecoder.h 2009-01-22 12:22:03.000000000 +1100 +++ mythtv/libs/libmythtv/avformatdecoder.h 2009-02-10 14:01:34.000000000 +1100 @@ -21,10 +21,12 @@ #define CODEC_IS_MPEG(c) (c == CODEC_ID_MPEG1VIDEO || \ c == CODEC_ID_MPEG2VIDEO || \ c == CODEC_ID_MPEG2VIDEO_DVDV || \ + c == CODEC_ID_MPEGVIDEO_VDPAU || \ c == CODEC_ID_MPEG2VIDEO_XVMC || \ c == CODEC_ID_MPEG2VIDEO_XVMC_VLD) #define CODEC_IS_HW_ACCEL(c) (c == CODEC_ID_MPEG2VIDEO_DVDV || \ + c == CODEC_ID_MPEGVIDEO_VDPAU || \ c == CODEC_ID_MPEG2VIDEO_XVMC || \ c == CODEC_ID_MPEG2VIDEO_XVMC_VLD) diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/dtvrecorder.cpp mythtv/libs/libmythtv/dtvrecorder.cpp --- mythtv.ori/libs/libmythtv/dtvrecorder.cpp 2009-01-22 12:22:02.000000000 +1100 +++ mythtv/libs/libmythtv/dtvrecorder.cpp 2009-02-10 14:01:34.000000000 +1100 @@ -13,8 +13,7 @@ #include "tv_rec.h" extern "C" { -// from libavcodec -extern const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state); +#include "../libavcodec/mpegvideo.h" } #define LOC QString("DTVRec(%1): ").arg(tvrec->GetCaptureCardNum()) diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/filtermanager.cpp mythtv/libs/libmythtv/filtermanager.cpp --- mythtv.ori/libs/libmythtv/filtermanager.cpp 2009-01-23 16:38:27.000000000 +1100 +++ mythtv/libs/libmythtv/filtermanager.cpp 2009-02-10 14:00:22.000000000 +1100 @@ -174,6 +174,10 @@ { QString FiltName = (*i).section('=', 0, 0); QString FiltOpts = (*i).section('=', 1); + + if (FiltName.contains("opengl")) + continue; + FI = GetFilterInfoByName(FiltName); if (FI) diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/frame.h mythtv/libs/libmythtv/frame.h --- mythtv.ori/libs/libmythtv/frame.h 2009-01-23 16:38:27.000000000 +1100 +++ mythtv/libs/libmythtv/frame.h 2009-02-10 14:01:34.000000000 +1100 @@ -21,7 +21,8 @@ FMT_ARGB32, FMT_RGBA32, FMT_YUV422P, - FMT_ALPHA, + FMT_BGRA, + FMT_VDPAU } VideoFrameType; typedef struct VideoFrame_ @@ -49,6 +50,8 @@ int pitches[3]; // Y, U, & V pitches int offsets[3]; // Y, U, & V offsets + + int pix_fmt; } VideoFrame; #ifdef __cplusplus @@ -88,8 +91,8 @@ vf->top_field_first = 1; vf->repeat_pict = 0; vf->forcekey = 0; + vf->pix_fmt = 0; - // MS Windows doesn't like bzero().. memset(vf->priv, 0, 4 * sizeof(unsigned char *)); if (p) @@ -145,9 +148,10 @@ { int uv_height = vf->height >> 1; // MS Windows doesn't like bzero().. - memset(vf->buf + vf->offsets[0], 0, vf->pitches[0] * vf->height); - memset(vf->buf + vf->offsets[1], 127, vf->pitches[1] * uv_height); - memset(vf->buf + vf->offsets[2], 127, vf->pitches[2] * uv_height); + // JYA: Patch to prevent some crashes with VDPAU, don't fix cause of problems + memset(vf->buf + vf->offsets[0], 0, vf->pitches[0] * vf->height > 0 ? vf->pitches[0] * vf->height : 0); + memset(vf->buf + vf->offsets[1], 127, vf->pitches[1] * uv_height > 0 ? vf->pitches[1] * uv_height : 0); + memset(vf->buf + vf->offsets[2], 127, vf->pitches[2] * uv_height > 0 ? vf->pitches[2] * uv_height : 0); } } diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/libmythtv.pro mythtv/libs/libmythtv/libmythtv.pro --- mythtv.ori/libs/libmythtv/libmythtv.pro 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libmythtv/libmythtv.pro 2009-02-10 14:01:34.000000000 +1100 @@ -7,6 +7,17 @@ target.path = $${LIBDIR} INSTALLS = target +POSTINC = + +contains(INCLUDEPATH, /usr/include) { + POSTINC += /usr/include + INCLUDEPATH -= /usr/include +} +contains(INCLUDEPATH, /usr/local/include) { + POSTINC += /usr/local/include + INCLUDEPATH -= /usr/local/include +} + INCLUDEPATH += ../.. .. . INCLUDEPATH += ../libmyth ../libavcodec ../libavutil ../libmythmpeg2 INCLUDEPATH += ./dvbdev ./mpeg ./iptv @@ -14,6 +25,8 @@ INCLUDEPATH += ../libmythlivemedia/groupsock/include INCLUDEPATH += ../libmythlivemedia/liveMedia/include INCLUDEPATH += ../libmythlivemedia/UsageEnvironment/include +INCLUDEPATH += ../libmythui +INCLUDEPATH += $$POSTINC DEPENDPATH += ../libmyth ../libavcodec ../libavformat ../libavutil DEPENDPATH += ../libmythmpeg2 ../libmythdvdnav @@ -301,6 +314,13 @@ using_xvmc_vld:DEFINES += USING_XVMC_VLD using_xvmc_pbuffer:DEFINES += USING_XVMC_PBUFFER + using_vdpau { + DEFINES += USING_VDPAU + HEADERS += util-vdpau.h + SOURCES += util-vdpau.cpp + LIBS += -lvdpau + } + using_opengl { CONFIG += opengl DEFINES += USING_OPENGL diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/mythcodecid.cpp mythtv/libs/libmythtv/mythcodecid.cpp --- mythtv.ori/libs/libmythtv/mythcodecid.cpp 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libmythtv/mythcodecid.cpp 2009-02-10 14:01:34.000000000 +1100 @@ -28,6 +28,10 @@ return "MPEG4"; case kCodec_H264: return "H.264"; + case kCodec_VC1: + return "VC-1"; + case kCodec_WMV3: + return "WMV3"; case kCodec_MPEG1_XVMC: return "MPEG1 XvMC"; @@ -73,6 +77,21 @@ case kCodec_H264_DVDV: return "H.264 DVDV"; + case kCodec_MPEG1_VDPAU: + return "MPEG1 VDPAU"; + case kCodec_MPEG2_VDPAU: + return "MPEG2 VDPAU"; + case kCodec_H263_VDPAU: + return "H.263 VDPAU"; + case kCodec_MPEG4_VDPAU: + return "MPEG4 VDPAU"; + case kCodec_H264_VDPAU: + return "H.264 VDPAU"; + case kCodec_VC1_VDPAU: + return "VC1 VDPAU"; + case kCodec_WMV3_VDPAU: + return "WMV3 VDPAU"; + default: break; } @@ -81,9 +100,9 @@ } int myth2av_codecid(MythCodecID codec_id, - bool &vld, bool &idct, bool &mc) + bool &vld, bool &idct, bool &mc, bool &vdpau) { - vld = idct = mc = false; + vld = idct = mc = vdpau = false; CodecID ret = CODEC_ID_NONE; switch (codec_id) { @@ -109,6 +128,13 @@ ret = CODEC_ID_H264; break; + case kCodec_VC1: + ret = CODEC_ID_VC1; + break; + case kCodec_WMV3: + ret = CODEC_ID_WMV3; + break; + case kCodec_MPEG1_XVMC: case kCodec_MPEG2_XVMC: mc = true; @@ -168,12 +194,37 @@ VERBOSE(VB_IMPORTANT, "Error: DVDV H.263 not supported by ffmpeg"); break; case kCodec_MPEG4_DVDV: - VERBOSE(VB_IMPORTANT, "Error: DVDV MPEG not supported by ffmpeg"); + VERBOSE(VB_IMPORTANT, "Error: DVDV MPEG4 not supported by ffmpeg"); break; case kCodec_H264_DVDV: VERBOSE(VB_IMPORTANT, "Error: DVDV H.265 not supported by ffmpeg"); break; + case kCodec_MPEG1_VDPAU: + case kCodec_MPEG2_VDPAU: + ret = CODEC_ID_MPEGVIDEO_VDPAU; + vdpau = true; + break; + case kCodec_H263_VDPAU: + VERBOSE(VB_IMPORTANT, "Error: VDPAU H.263 not supported by ffmpeg"); + break; + case kCodec_MPEG4_VDPAU: + VERBOSE(VB_IMPORTANT, "Error: VDPAU MPEG4 not supported by ffmpeg"); + break; + + case kCodec_H264_VDPAU: + ret = CODEC_ID_H264_VDPAU; + vdpau = true; + break; + case kCodec_VC1_VDPAU: + ret = CODEC_ID_VC1_VDPAU; + vdpau = true; + break; + case kCodec_WMV3_VDPAU: + ret = CODEC_ID_WMV3_VDPAU; + vdpau = true; + break; + default: VERBOSE(VB_IMPORTANT, QString("Error: MythCodecID %1 has not been " diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/mythcodecid.h mythtv/libs/libmythtv/mythcodecid.h --- mythtv.ori/libs/libmythtv/mythcodecid.h 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libmythtv/mythcodecid.h 2009-02-10 14:01:34.000000000 +1100 @@ -9,6 +9,8 @@ // myth2av_codecid, and NuppelVideoPlayer::GetEncodingType() kCodec_NONE = 0, + kCodec_NORMAL_BEGIN = kCodec_NONE, + kCodec_NUV_RTjpeg, kCodec_NUV_MPEG4, @@ -17,9 +19,13 @@ kCodec_H263, kCodec_MPEG4, kCodec_H264, + kCodec_VC1, + kCodec_WMV3, kCodec_NORMAL_END, + kCodec_STD_XVMC_BEGIN = kCodec_NORMAL_END, + kCodec_MPEG1_XVMC, kCodec_MPEG2_XVMC, kCodec_H263_XVMC, @@ -34,6 +40,8 @@ kCodec_STD_XVMC_END, + kCodec_VLD_BEGIN = kCodec_STD_XVMC_END, + kCodec_MPEG1_VLD, kCodec_MPEG2_VLD, kCodec_H263_VLD, @@ -42,22 +50,37 @@ kCodec_VLD_END, + kCodec_DVDV_BEGIN = kCodec_VLD_END, + kCodec_MPEG1_DVDV, kCodec_MPEG2_DVDV, kCodec_H263_DVDV, kCodec_MPEG4_DVDV, kCodec_H264_DVDV, - kCodec_DVDV_END + kCodec_DVDV_END, + + kCodec_VDPAU_BEGIN = kCodec_DVDV_END, + + kCodec_MPEG1_VDPAU, + kCodec_MPEG2_VDPAU, + kCodec_H263_VDPAU, + kCodec_MPEG4_VDPAU, + kCodec_H264_VDPAU, + kCodec_VC1_VDPAU, + kCodec_WMV3_VDPAU, + + kCodec_VDPAU_END, } MythCodecID; QString toString(MythCodecID codecid); -int myth2av_codecid(MythCodecID codec_id, bool &vld, bool &idct, bool &mc); +int myth2av_codecid(MythCodecID codec_id, bool &vld, bool &idct, bool &mc, + bool &vdpau); inline int myth2av_codecid(MythCodecID codec_id) { - bool vld, idct, mc; - return myth2av_codecid(codec_id, vld, idct, mc); + bool vld, idct, mc, vdpau; + return myth2av_codecid(codec_id, vld, idct, mc, vdpau); } #endif // _MYTH_CODEC_ID_H_ diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/openglcontext.cpp mythtv/libs/libmythtv/openglcontext.cpp --- mythtv.ori/libs/libmythtv/openglcontext.cpp 2009-01-23 16:38:27.000000000 +1100 +++ mythtv/libs/libmythtv/openglcontext.cpp 2009-02-10 14:00:22.000000000 +1100 @@ -4,16 +4,57 @@ #include "util-opengl.h" + #define LOC QString("GLCtx: ") #define LOC_ERR QString("GLCtx, Error: ") +OpenGLContextLocker::OpenGLContextLocker(OpenGLContext *ctx) + : m_ctx(ctx) +{ + if (m_ctx) + m_ctx->MakeCurrent(true); +} +OpenGLContextLocker::~OpenGLContextLocker() +{ + if (m_ctx) + m_ctx->MakeCurrent(false); +} + +class MythGLTexture +{ + public: + MythGLTexture() : + m_type(GL_TEXTURE_2D), m_data(NULL), m_data_size(0), + m_data_type(GL_UNSIGNED_BYTE), m_data_fmt(GL_BGRA), + m_internal_fmt(GL_RGBA8), m_pbo(0), + m_filter(GL_LINEAR), m_wrap(GL_CLAMP_TO_EDGE), + m_size(0,0), m_vid_size(0,0) + { + } + + ~MythGLTexture() + { + } + + GLuint m_type; + unsigned char *m_data; + uint m_data_size; + GLuint m_data_type; + GLuint m_data_fmt; + GLuint m_internal_fmt; + GLuint m_pbo; + GLuint m_filter; + GLuint m_wrap; + QSize m_size; + QSize m_vid_size; +}; + class PrivateContext { public: PrivateContext() : m_glx_fbconfig(0), m_gl_window(0), m_glx_window(0), - m_glx_context(NULL), - m_texture_type(GL_TEXTURE_2D), m_textures_enabled(false), + m_glx_context(NULL), m_texture_type(0), m_vis_info(NULL), m_attr_list(NULL) { } @@ -27,21 +68,24 @@ GLXWindow m_glx_window; GLXContext m_glx_context; int m_texture_type; - bool m_textures_enabled; XVisualInfo *m_vis_info; int const *m_attr_list; - vector m_textures; + map m_textures; vector m_programs; vector m_framebuffers; + GLuint m_fence; }; -OpenGLContext::OpenGLContext() : +OpenGLContext::OpenGLContext(QMutex *lock) : m_priv(new PrivateContext()), m_display(NULL), m_screen_num(0), m_major_ver(1), m_minor_ver(2), m_extensions(QString::null), m_ext_supported(0), - m_visible(true), m_max_tex_size(0) + m_ext_used(0), + m_max_tex_size(0), m_viewport(0,0), + m_lock(lock), m_lock_level(0), + m_colour_control(false) { if (!init_opengl()) VERBOSE(VB_PLAYBACK, LOC_ERR + "Failed to initialize OpenGL support."); @@ -56,11 +100,17 @@ DeletePrograms(); DeleteTextures(); DeleteFrameBuffers(); - } - glFlush(); + Flush(true); - MakeCurrent(false); + if (m_priv->m_fence && + (m_ext_supported & kGLNVFence)) + { + gMythGLDeleteFencesNV(1, &(m_priv->m_fence)); + } + } + + Flush(false); if (m_priv->m_glx_window) { @@ -74,6 +124,8 @@ m_priv->m_gl_window = 0; } + MakeCurrent(false); + if (m_priv->m_glx_context) { X11S(glXDestroyContext(m_display, m_priv->m_glx_context)); @@ -89,22 +141,26 @@ void OpenGLContext::Hide(void) { + MakeCurrent(true); X11S(XUnmapWindow(m_display, m_priv->m_gl_window)); + MakeCurrent(false); } void OpenGLContext::Show(void) { + MakeCurrent(true); X11S(XMapWindow(m_display, m_priv->m_gl_window)); + MakeCurrent(false); } // locking ok bool OpenGLContext::Create( Display *XJ_disp, Window XJ_curwin, uint screen_num, - const QSize &display_visible_size, bool visible) + const QRect &display_visible, bool colour_control) { static bool debugged = false; - m_visible = visible; + m_colour_control = colour_control; m_display = XJ_disp; m_screen_num = screen_num; uint major, minor; @@ -180,7 +236,7 @@ } m_priv->m_gl_window = get_gl_window( - XJ_disp, XJ_curwin, m_priv->m_vis_info, display_visible_size, visible); + XJ_disp, XJ_curwin, m_priv->m_vis_info, display_visible); if (!m_priv->m_gl_window) { @@ -202,8 +258,7 @@ } } - VERBOSE(VB_PLAYBACK, LOC + QString("Created window%1 and context.") - .arg(m_visible ? "" : " (Offscreen)")); + VERBOSE(VB_PLAYBACK, LOC + QString("Created window and context.")); { MakeCurrent(true); @@ -237,15 +292,29 @@ MakeCurrent(false); } - int tex_type = get_gl_texture_rect_type(m_extensions); - m_priv->m_texture_type = (tex_type) ? tex_type : m_priv->m_texture_type; - m_ext_supported = - ((tex_type) ? kGLExtRect : 0) | + ((get_gl_texture_rect_type(m_extensions)) ? kGLExtRect : 0) | ((has_gl_fragment_program_support(m_extensions)) ? kGLExtFragProg : 0) | + ((has_gl_pixelbuffer_object_support(m_extensions)) ? + kGLExtPBufObj : 0) | ((has_gl_fbuffer_object_support(m_extensions)) ? kGLExtFBufObj : 0) | - ((minor >= 3) ? kGLXPBuffer : 0); + ((has_gl_nvfence_support(m_extensions)) ? kGLNVFence : 0) | + ((minor >= 3) ? kGLXPBuffer : 0) | kGLFinish; + + m_ext_used = m_ext_supported; + + MakeCurrent(true); + + if (m_ext_used & kGLNVFence) + { + gMythGLGenFencesNV(1, &(m_priv->m_fence)); + if (m_priv->m_fence) + VERBOSE(VB_PLAYBACK, LOC + "Using GL_NV_fence"); + } + + Init2DState(); + MakeCurrent(false); return true; } @@ -253,30 +322,52 @@ // locking ok bool OpenGLContext::MakeCurrent(bool current) { - bool ok; + bool ok = true; if (current) { - if (IsGLXSupported(1,3)) - { - X11S(ok = glXMakeCurrent(m_display, - m_priv->m_glx_window, - m_priv->m_glx_context)); - } - else + m_lock->lock(); + if (m_lock_level == 0) { - X11S(ok = glXMakeCurrent(m_display, - m_priv->m_gl_window, - m_priv->m_glx_context)); + if (IsGLXSupported(1,3)) + { + X11S(ok = glXMakeContextCurrent(m_display, + m_priv->m_glx_window, + m_priv->m_glx_window, + m_priv->m_glx_context)); + } + else + { + X11S(ok = glXMakeCurrent(m_display, + m_priv->m_gl_window, + m_priv->m_glx_context)); + } } + m_lock_level++; } else { - X11S(ok = glXMakeCurrent(m_display, None, NULL)); + m_lock_level--; + if (m_lock_level == 0) + { + if (IsGLXSupported(1,3)) + { + X11S(ok = glXMakeContextCurrent(m_display, None, None, NULL)); + } + else + { + X11S(ok = glXMakeCurrent(m_display, None, NULL)); + } + } + else if (m_lock_level < 0) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + "Mis-matched calls to MakeCurrent"); + } + m_lock->unlock(); } if (!ok) - VERBOSE(VB_PLAYBACK, LOC + "Could not make context current."); + VERBOSE(VB_PLAYBACK, LOC_ERR + "Could not make context current."); return ok; } @@ -284,48 +375,205 @@ // locking ok void OpenGLContext::SwapBuffers(void) { - if (m_visible) - { - MakeCurrent(true); + MakeCurrent(true); + if (m_ext_used & kGLFinish) glFinish(); - if (IsGLXSupported(1,3)) - X11S(glXSwapBuffers(m_display, m_priv->m_glx_window)); - else - X11S(glXSwapBuffers(m_display, m_priv->m_gl_window)); - MakeCurrent(false); - } + if (IsGLXSupported(1,3)) + X11S(glXSwapBuffers(m_display, m_priv->m_glx_window)); + else + X11S(glXSwapBuffers(m_display, m_priv->m_gl_window)); + + MakeCurrent(false); } // locking ok -void OpenGLContext::Flush(void) +void OpenGLContext::Flush(bool use_fence) { - glFlush(); + MakeCurrent(true); + + if ((m_ext_used & kGLNVFence) && + m_priv->m_fence && use_fence) + { + gMythGLSetFenceNV(m_priv->m_fence, GL_ALL_COMPLETED_NV); + gMythGLFinishFenceNV(m_priv->m_fence); + } + else + { + glFlush(); + } + + MakeCurrent(false); } // locking ok -void OpenGLContext::EnableTextures(void) +void OpenGLContext::EnableTextures(uint tex, uint tex_type) { - if (!m_priv->m_textures_enabled) + MakeCurrent(true); + + int type = tex ? m_priv->m_textures[tex].m_type : tex_type; + + if (type != m_priv->m_texture_type) { - m_priv->m_textures_enabled = true; + if (m_priv->m_texture_type) + { + glDisable(m_priv->m_texture_type); + } + glEnable(type); + m_priv->m_texture_type = type; + } - MakeCurrent(true); - glEnable(GetTextureType()); - MakeCurrent(false); + MakeCurrent(false); +} + +void OpenGLContext::DisableTextures(void) +{ + MakeCurrent(true); + + glDisable(m_priv->m_texture_type); + m_priv->m_texture_type = 0; + + MakeCurrent(false); +} + +void OpenGLContext::UpdateTexture(uint tex, + const unsigned char *buf, + const int *offsets, + const int *pitches, + VideoFrameType fmt, + bool interlaced, + const unsigned char* alpha) +{ + MakeCurrent(true); + + MythGLTexture *tmp_tex = &m_priv->m_textures[tex]; + QSize size = tmp_tex->m_vid_size; + + EnableTextures(tex); + glBindTexture(tmp_tex->m_type, tex); + + if (tmp_tex->m_pbo) + { + void *pboMemory; + + gMythGLBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, tmp_tex->m_pbo); + gMythGLBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, + tmp_tex->m_data_size, NULL, GL_STREAM_DRAW); + + pboMemory = gMythGLMapBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, + GL_WRITE_ONLY); + + if (FMT_BGRA == fmt) + { + memcpy(pboMemory, buf, tmp_tex->m_data_size); + } + else if (interlaced) + { + pack_yv12interlaced(buf, (unsigned char *)pboMemory, + offsets, pitches, size); + } + else + { + pack_yv12alpha(buf, (unsigned char *)pboMemory, + offsets, pitches, size, alpha); + } + + gMythGLUnmapBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB); + + glTexSubImage2D(tmp_tex->m_type, 0, 0, 0, size.width(), size.height(), + tmp_tex->m_data_fmt, tmp_tex->m_data_type, 0); + + gMythGLBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0); } + else + { + if (!tmp_tex->m_data) + { + unsigned char *scratch = new unsigned char[tmp_tex->m_data_size]; + if (scratch) + { + bzero(scratch, tmp_tex->m_data_size); + tmp_tex->m_data = scratch; + } + } + + if (tmp_tex->m_data) + { + const unsigned char *tmp = tmp_tex->m_data; + + if (FMT_BGRA == fmt) + { + tmp = buf; + } + else if (interlaced) + { + pack_yv12interlaced(buf, tmp, + offsets, pitches, size); + } + else + { + pack_yv12alpha(buf, tmp, offsets, + pitches, size, alpha); + } + + glTexSubImage2D(tmp_tex->m_type, 0, 0, 0, + size.width(), size.height(), + tmp_tex->m_data_fmt, tmp_tex->m_data_type, + tmp); + } + } + + MakeCurrent(false); } // locking ok -uint OpenGLContext::CreateTexture(void) -{ +uint OpenGLContext::CreateTexture(QSize tot_size, QSize vid_size, + bool use_pbo, + uint type, uint data_type, + uint data_fmt, uint internal_fmt, + uint filter, uint wrap) +{ + if ((uint)tot_size.width() > m_max_tex_size || + (uint)tot_size.height() > m_max_tex_size) + return 0; + MakeCurrent(true); + EnableTextures(0, type); + GLuint tex; glGenTextures(1, &tex); - SetupTextureFilters(tex, GL_LINEAR); - m_priv->m_textures.push_back(tex); + glBindTexture(type, tex); + + if (tex) + { + MythGLTexture *texture = new MythGLTexture(); + texture->m_type = type; + texture->m_data_type = data_type; + texture->m_data_fmt = data_fmt; + texture->m_internal_fmt = internal_fmt; + texture->m_size = tot_size; + texture->m_vid_size = vid_size; + texture->m_data_size = GetBufferSize(vid_size, data_fmt, data_type); + m_priv->m_textures[tex] = *texture; + + if (ClearTexture(tex) && m_priv->m_textures[tex].m_data_size) + { + SetTextureFilters(tex, filter, wrap); + if (use_pbo) + m_priv->m_textures[tex].m_pbo = CreatePBO(tex); + } + else + { + DeleteTexture(tex); + tex = 0; + } + + delete texture; + } + + Flush(true); MakeCurrent(false); @@ -333,83 +581,177 @@ } // locking ok -bool OpenGLContext::SetupTexture(const QSize &size, uint tex, int filt) +uint OpenGLContext::GetBufferSize(QSize size, uint fmt, uint type) { - unsigned char *scratch = - new unsigned char[(size.width() * size.height() * 4) + 128]; + uint bytes; + uint bpp; - bzero(scratch, size.width() * size.height() * 4); + switch (fmt) + { + case GL_BGRA: + case GL_RGBA: + bpp = 4; + break; + default: + bpp =0; + } - GLint check; + switch (type) + { + case GL_UNSIGNED_BYTE: + bytes = sizeof(GLubyte); + break; + case GL_FLOAT: + bytes = sizeof(GLfloat); + break; + default: + bytes = 0; + } - MakeCurrent(true); - SetupTextureFilters(tex, filt); - glTexImage2D(GetTextureType(), 0, GL_RGBA8, size.width(), size.height(), - 0, GL_RGB , GL_UNSIGNED_BYTE, scratch); - glGetTexLevelParameteriv(GetTextureType(), 0, GL_TEXTURE_WIDTH, &check); - MakeCurrent(false); + if (!bpp || !bytes || size.width() < 1 || size.height() < 1) + return 0; + + return size.width() * size.height() * bpp * bytes; +} - if (scratch) +// locking ok +bool OpenGLContext::ClearTexture(uint tex) +{ + MythGLTexture *tmp = &m_priv->m_textures[tex]; + QSize size = tmp->m_size; + + uint tmp_size = GetBufferSize(size, tmp->m_data_fmt, tmp->m_data_type); + + if (!tmp_size) + return false; + + unsigned char *scratch = new unsigned char[tmp_size]; + + if (!scratch) + return false; + + bzero(scratch, tmp_size); + + GLint check; + if (tmp->m_type == GL_TEXTURE_1D) { - delete scratch; - scratch = NULL; + glTexImage1D(tmp->m_type, 0, tmp->m_internal_fmt, + size.width(), 0, + tmp->m_data_fmt , tmp->m_data_type, scratch); } + else + { + glTexImage2D(tmp->m_type, 0, tmp->m_internal_fmt, + size.width(), size.height(), 0, + tmp->m_data_fmt , tmp->m_data_type, scratch); + } + glGetTexLevelParameteriv(tmp->m_type, 0, GL_TEXTURE_WIDTH, &check); + + delete [] scratch; return (check == size.width()); } // locking ok -void OpenGLContext::SetupTextureFilters(uint tex, int filt) +void OpenGLContext::SetTextureFilters(uint tex, uint filt, uint wrap) { - glBindTexture(GetTextureType(), tex); - glTexParameteri(GetTextureType(), GL_TEXTURE_MIN_FILTER, filt); - glTexParameteri(GetTextureType(), GL_TEXTURE_MAG_FILTER, filt); - glTexParameteri(GetTextureType(), GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); - glTexParameteri(GetTextureType(), GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + if (!m_priv->m_textures.count(tex)) + return; + + MakeCurrent(true); + + EnableTextures(tex); + + m_priv->m_textures[tex].m_filter = filt; + m_priv->m_textures[tex].m_wrap = wrap; + + uint type = m_priv->m_textures[tex].m_type; + + glBindTexture(type, tex); + glTexParameteri(type, GL_TEXTURE_MIN_FILTER, filt); + glTexParameteri(type, GL_TEXTURE_MAG_FILTER, filt); + glTexParameteri(type, GL_TEXTURE_WRAP_S, wrap); + if (type != GL_TEXTURE_1D) + glTexParameteri(type, GL_TEXTURE_WRAP_T, wrap); + + MakeCurrent(false); } // locking ok void OpenGLContext::DeleteTexture(uint tex) { + if (!m_priv->m_textures.count(tex)) + return; + MakeCurrent(true); - vector::iterator it; - for (it = m_priv->m_textures.begin(); it !=m_priv->m_textures.end(); it++) + GLuint gltex = tex; + glDeleteTextures(1, &gltex); + + if (m_priv->m_textures[tex].m_data) { - if (*(it) == tex) - { - GLuint gltex = tex; - glDeleteTextures(1, &gltex); - m_priv->m_textures.erase(it); - break; - } + delete m_priv->m_textures[tex].m_data; + } + + if (m_priv->m_textures[tex].m_pbo) + { + gMythGLDeleteBuffersARB(1, &(m_priv->m_textures[tex].m_pbo)); } + m_priv->m_textures.erase(tex); + + Flush(true); + MakeCurrent(false); } // locking ok void OpenGLContext::DeleteTextures(void) { - MakeCurrent(true); - - vector::iterator it; + map::iterator it; for (it = m_priv->m_textures.begin(); it !=m_priv->m_textures.end(); it++) - glDeleteTextures(1, &(*(it))); + { + GLuint gltex = it->first; + glDeleteTextures(1, &gltex); + + if (it->second.m_data) + { + delete it->second.m_data; + } + + if (it->second.m_pbo) + { + gltex = it->second.m_pbo; + gMythGLDeleteBuffersARB(1, &gltex); + } + } m_priv->m_textures.clear(); - MakeCurrent(false); + Flush(true); } -int OpenGLContext::GetTextureType(void) const +void OpenGLContext::GetTextureType(uint ¤t, bool &rect) { - return m_priv->m_texture_type; + uint type = get_gl_texture_rect_type(m_extensions); + if (type) + { + rect = true; + current = type; + return; + } + + rect = false; + return; } // locking ok bool OpenGLContext::CreateFragmentProgram(const QString &program, uint &fp) { bool success = true; + + if (!(m_ext_used & kGLExtFragProg)) + return false; + GLint error; MakeCurrent(true); @@ -449,6 +791,8 @@ gMythGLDeleteProgramsARB(1, &glfp); } + Flush(true); + MakeCurrent(false); fp = glfp; @@ -473,53 +817,66 @@ } } + Flush(true); + MakeCurrent(false); } void OpenGLContext::BindFragmentProgram(uint fp) { + MakeCurrent(true); gMythGLBindProgramARB(GL_FRAGMENT_PROGRAM_ARB, fp); + MakeCurrent(false); } void OpenGLContext::InitFragmentParams( uint fp, float a, float b, float c, float d) { + MakeCurrent(true); gMythGLProgramEnvParameter4fARB( GL_FRAGMENT_PROGRAM_ARB, fp, a, b, c, d); + MakeCurrent(false); } void OpenGLContext::DeletePrograms(void) { - MakeCurrent(true); - vector::iterator it; for (it = m_priv->m_programs.begin(); it != m_priv->m_programs.end(); it++) gMythGLDeleteProgramsARB(1, &(*(it))); m_priv->m_programs.clear(); - MakeCurrent(false); + Flush(true); } // locking ok -bool OpenGLContext::CreateFrameBuffer(uint &fb, uint tex, const QSize &size) +bool OpenGLContext::CreateFrameBuffer(uint &fb, uint tex) { + if (!(m_ext_used & kGLExtFBufObj)) + return false; + + if (!m_priv->m_textures.count(tex)) + return false; + + MythGLTexture *tmp = &m_priv->m_textures[tex]; + QSize size = tmp->m_size; GLuint glfb; MakeCurrent(true); + glCheck(); - SetupTextureFilters(tex, GL_LINEAR); + EnableTextures(tex); glPushAttrib(GL_VIEWPORT_BIT); glViewport(0, 0, size.width(), size.height()); gMythGLGenFramebuffersEXT(1, &glfb); gMythGLBindFramebufferEXT(GL_FRAMEBUFFER_EXT, glfb); - glBindTexture(GetTextureType(), tex); - glTexImage2D(GetTextureType(), 0, GL_RGBA8, + glBindTexture(tmp->m_type, tex); + glTexImage2D(tmp->m_type, 0, tmp->m_internal_fmt, (GLint) size.width(), (GLint) size.height(), 0, - GL_RGB, GL_UNSIGNED_BYTE, NULL); + tmp->m_data_fmt, tmp->m_data_type, NULL); gMythGLFramebufferTexture2DEXT( GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, - GetTextureType(), tex, 0); + tmp->m_type, tex, 0); GLenum status; status = gMythGLCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT); @@ -575,6 +932,9 @@ else gMythGLDeleteFramebuffersEXT(1, &glfb); + Flush(true); + + glCheck(); MakeCurrent(false); fb = glfb; @@ -600,13 +960,13 @@ } } + Flush(true); + MakeCurrent(false); } void OpenGLContext::DeleteFrameBuffers(void) { - MakeCurrent(true); - vector::iterator it; for (it = m_priv->m_framebuffers.begin(); it != m_priv->m_framebuffers.end(); it++) @@ -615,13 +975,15 @@ } m_priv->m_framebuffers.clear(); - MakeCurrent(false); + Flush(true); } // locking ok void OpenGLContext::BindFramebuffer(uint fb) { + MakeCurrent(true); gMythGLBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fb); + MakeCurrent(false); } bool OpenGLContext::IsGLXSupported( @@ -636,3 +998,171 @@ return false; } + +void OpenGLContext::Init2DState(void) +{ + glPixelStorei(GL_UNPACK_ALIGNMENT, 1); + glDisable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); // for gl osd + glDisable(GL_DEPTH_TEST); + glDepthMask(GL_FALSE); + glDisable(GL_CULL_FACE); + glShadeModel(GL_FLAT); + glDisable(GL_POLYGON_SMOOTH); + glDisable(GL_LINE_SMOOTH); + glDisable(GL_POINT_SMOOTH); + glClearColor(0.0f, 0.0f, 0.0f, 0.0f); + glClear(GL_COLOR_BUFFER_BIT); + Flush(true); +} + +void OpenGLContext::SetViewPort(const QSize &size) +{ + if (size.width() == m_viewport.width() && + size.height() == m_viewport.height()) + return; + + MakeCurrent(true); + + m_viewport = size; + + glViewport(0, 0, size.width(), size.height()); + glMatrixMode(GL_PROJECTION); + glLoadIdentity(); + glOrtho(0, size.width() - 1, + 0, size.height() - 1, 1, -1); // aargh... + glMatrixMode(GL_MODELVIEW); + glLoadIdentity(); + + MakeCurrent(false); +} + +uint OpenGLContext::CreatePBO(uint tex) +{ + if (!(m_ext_used & kGLExtPBufObj)) + return 0; + + if (!m_priv->m_textures.count(tex)) + return 0; + + MythGLTexture *tmp = &m_priv->m_textures[tex]; + + gMythGLBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0); + glTexImage2D(tmp->m_type, 0, tmp->m_internal_fmt, + tmp->m_size.width(), tmp->m_size.height(), 0, + tmp->m_data_fmt, tmp->m_data_type, NULL); + + GLuint tmp_pbo; + gMythGLGenBuffersARB(1, &tmp_pbo); + + gMythGLBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0); + + Flush(true); + + return tmp_pbo; +} + +uint OpenGLContext::CreateHelperTexture(void) +{ + MakeCurrent(true); + + uint width = m_max_tex_size; + + uint tmp_tex = CreateTexture(QSize(width, 1), QSize(width, 1), + false, + GL_TEXTURE_1D, GL_FLOAT, + GL_RGBA, GL_RGBA16, + GL_NEAREST, GL_REPEAT); + + if (!tmp_tex) + { + DeleteTexture(tmp_tex); + return 0; + } + + float *buf = NULL; + buf = new float[m_priv->m_textures[tmp_tex].m_data_size]; + float *ref = buf; + + for (uint i = 0; i < width; i++) + { + float x = (((float)i) + 0.5f) / (float)width; + store_bicubic_weights(x, ref); + ref += 4; + } + store_bicubic_weights(0, buf); + store_bicubic_weights(1, &buf[(width - 1) << 2]); + + EnableTextures(tmp_tex); + glBindTexture(m_priv->m_textures[tmp_tex].m_type, tmp_tex); + glTexImage1D(GL_TEXTURE_1D, 0, GL_RGBA16, width, 0, GL_RGBA, GL_FLOAT, buf); + + VERBOSE(VB_PLAYBACK, LOC + + QString("Created bicubic helper texture (%1 samples)") + .arg(width)); + + delete [] buf; + + MakeCurrent(false); + + return tmp_tex; +} + +int OpenGLContext::SetPictureAttribute( + PictureAttribute attribute, int newValue) +{ + if (!m_colour_control) + return -1; + + MakeCurrent(true); + + int ret = -1; + switch (attribute) + { + case kPictureAttribute_Brightness: + ret = newValue; + pictureAttribs[attribute] = (newValue * 0.02f) - 0.5f; + break; + case kPictureAttribute_Contrast: + case kPictureAttribute_Colour: + ret = newValue; + pictureAttribs[attribute] = (newValue * 0.02f); + break; + case kPictureAttribute_Hue: // not supported yet... + break; + default: + break; + } + + MakeCurrent(false); + + return ret; +} + +PictureAttributeSupported +OpenGLContext::GetSupportedPictureAttributes(void) const +{ + return (!m_colour_control) ? + kPictureAttributeSupported_None : + (PictureAttributeSupported) + (kPictureAttributeSupported_Brightness | + kPictureAttributeSupported_Contrast | + kPictureAttributeSupported_Colour); +} + +void OpenGLContext::SetColourParams(void) +{ + if (!m_colour_control) + return; + + MakeCurrent(true); + + InitFragmentParams(0, + pictureAttribs[kPictureAttribute_Brightness], + pictureAttribs[kPictureAttribute_Contrast], + pictureAttribs[kPictureAttribute_Colour], + 0.5f); + + MakeCurrent(false); +} + diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/openglcontext.h mythtv/libs/libmythtv/openglcontext.h --- mythtv.ori/libs/libmythtv/openglcontext.h 2009-01-23 16:38:27.000000000 +1100 +++ mythtv/libs/libmythtv/openglcontext.h 2009-02-10 14:00:22.000000000 +1100 @@ -11,6 +11,24 @@ // MythTV headers #include "util-x11.h" +#include "frame.h" +#include "videooutbase.h" + +#ifndef GL_BGRA +#define GL_BGRA 0x80E1 +#endif +#ifndef GL_UNSIGNED_BYTE +#define GL_UNSIGNED_BYTE 0x1401 +#endif +#ifndef GL_RGBA8 +#define GL_RGBA8 0x8058 +#endif +#ifndef GL_LINEAR +#define GL_LINEAR 0x2601 +#endif +#ifndef GL_CLAMP_TO_EDGE +#define GL_CLAMP_TO_EDGE 0x812F +#endif class OpenGLVideo; class PrivateContext; @@ -21,59 +39,96 @@ kGLExtFragProg = 0x02, kGLExtFBufObj = 0x04, kGLXPBuffer = 0x08, + kGLExtPBufObj = 0x10, + kGLNVFence = 0x20, + kGLFinish = 0x40, + kGLMaxFeat = 0x80, } GLFeatures; +class OpenGLContext; + +class OpenGLContextLocker +{ + public: + OpenGLContextLocker(OpenGLContext *ctx); + ~OpenGLContextLocker(); + + private: + OpenGLContext *m_ctx; +}; + #ifdef USING_OPENGL class OpenGLContext { public: - OpenGLContext(); + OpenGLContext(QMutex *lock); ~OpenGLContext(); bool Create(Display *display, Window window, uint screen_num, - const QSize &display_visible_size, bool visible); + const QRect &display_visible, bool colour_control = false); + void SetViewPort(const QSize &size); void Hide(void); void Show(void); bool MakeCurrent(bool current); void SwapBuffers(void); - void Flush(void); + void Flush(bool use_fence); - uint GetMaxTexSize(void) const { return m_max_tex_size; } uint GetScreenNum(void) const { return m_screen_num; } - uint CreateTexture(void); - bool SetupTexture(const QSize &size, uint tex, int filt); - void SetupTextureFilters(uint tex, int filt); + void UpdateTexture(uint tex, const unsigned char *buf, + const int *offsets, + const int *pitches, + VideoFrameType fmt, + bool interlaced = FALSE, + const unsigned char* alpha = NULL); + uint CreateTexture(QSize tot_size, QSize vid_size, + bool use_pbo, uint type, + uint data_type = GL_UNSIGNED_BYTE, + uint data_fmt = GL_BGRA, + uint internal_fmt = GL_RGBA8, + uint filter = GL_LINEAR, + uint wrap = GL_CLAMP_TO_EDGE); + void SetTextureFilters(uint tex, uint filt, uint wrap); void DeleteTexture(uint tex); - int GetTextureType(void) const; - void EnableTextures(void); + void GetTextureType(uint ¤t, bool &rect); + void EnableTextures(uint type, uint tex_type = 0); + void DisableTextures(void); bool CreateFragmentProgram(const QString &program, uint &prog); void DeleteFragmentProgram(uint prog); void BindFragmentProgram(uint fp); void InitFragmentParams(uint fp, float a, float b, float c, float d); - bool CreateFrameBuffer(uint &fb, uint tex, const QSize &size); + bool CreateFrameBuffer(uint &fb, uint tex); void DeleteFrameBuffer(uint fb); void BindFramebuffer(uint fb); - - bool IsFeatureSupported(GLFeatures feature) const - { return m_ext_supported & feature; } + uint GetFeatures(void) { return m_ext_supported; } + void SetFeatures(uint features) { m_ext_used = features; } static bool IsGLXSupported(Display *display, uint major, uint minor); + int SetPictureAttribute(PictureAttribute attributeType, int newValue); + PictureAttributeSupported GetSupportedPictureAttributes(void) const; + void SetColourParams(void); + uint CreateHelperTexture(void); + private: + void Init2DState(void); bool IsGLXSupported(uint major, uint minor) const { return (m_major_ver > major) || ((m_major_ver == major) && (m_minor_ver >= minor)); } + uint CreatePBO(uint tex); + void DeleteTextures(void); void DeletePrograms(void); void DeleteFrameBuffers(void); + uint GetBufferSize(QSize size, uint fmt, uint type); + bool ClearTexture(uint tex); PrivateContext *m_priv; @@ -83,8 +138,14 @@ uint m_minor_ver; QString m_extensions; uint m_ext_supported; - bool m_visible; + uint m_ext_used; uint m_max_tex_size; + QSize m_viewport; + QMutex *m_lock; + int m_lock_level; + bool m_colour_control; + + float pictureAttribs[kPictureAttribute_MAX]; }; #else // if !USING_OPENGL @@ -92,37 +153,51 @@ class OpenGLContext { public: - OpenGLContext() { } + OpenGLContext(QMutex*) { } ~OpenGLContext() { } - bool Create(Display*, Window, uint, const QSize&, bool) { return false; } + bool Create(Display*, Window, uint, const Rect&, bool = false) + { return false; } + void SetViewPort(const QSize&) { } + void Hide(void) { } + void Show(void) { } bool MakeCurrent(bool) { return false; } void SwapBuffers(void) { } - void Flush(void) { } + void Flush(bool) { } - uint GetMaxTexSize(void) const { return 0; } uint GetScreenNum(void) const { return 0; } - uint CreateTexture(void) { return 0; } - bool SetupTexture(const QSize&, uint, int) { return false; } - void SetupTextureFilters(uint, int) { } + void UpdateTexture(uint, const unsigned char*, + const int *, const int *, + VideoFrameType, bool = FALSE, + const unsigned char* = NULL) { } + uint CreateTexture(QSize, QSize, bool, uint, + uint = 0, uint = 0, uint = 0, + uint = 0, uint = 0) { return 0; } + void SetTextureFilters(uint, uint, uint) { } void DeleteTexture(uint) { } - int GetTextureType(void) const { return 0; } - void EnableTextures(void) { } + void GetTextureType(uint&, bool&) { } + void EnableTextures(uint, uint = 0) { } bool CreateFragmentProgram(const QString&, uint&) { return false; } void DeleteFragmentProgram(uint) { } void BindFragmentProgram(uint) { } void InitFragmentParams(uint, float, float, float, float) { } - bool CreateFrameBuffer(uint&, uint, const QSize&) { return false; } + bool CreateFrameBuffer(uint&, uint) { return false; } void DeleteFrameBuffer(uint); void BindFramebuffer(uint); - bool IsFeatureSupported(GLFeatures) const { return false; } - + uint GetFeatures(void) { return 0; } + void SetFeatures(uint) { } static bool IsGLXSupported(Display*, uint, uint) { return false; } + + int SetPictureAttribute(PictureAttribute, int) { return -1; } + PictureAttributeSupported GetSupportedPictureAttributes(void) const + { return kPictureAttributeSupported_None; } + void SetColourParams(void); + uint CreateHelperTexture(void); }; #endif //!USING_OPENGL diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/openglvideo.cpp mythtv/libs/libmythtv/openglvideo.cpp --- mythtv.ori/libs/libmythtv/openglvideo.cpp 2009-01-23 16:38:27.000000000 +1100 +++ mythtv/libs/libmythtv/openglvideo.cpp 2009-02-10 14:00:22.000000000 +1100 @@ -26,35 +26,34 @@ class OpenGLFilter { public: - GLuint fragmentProgram; + vector fragmentPrograms; uint numInputs; - bool rotateFrameBuffers; vector frameBuffers; vector frameBufferTextures; DisplayBuffer outputBuffer; }; OpenGLVideo::OpenGLVideo() : - gl_context(NULL), videoSize(0,0), - viewportSize(0,0), masterViewportSize(0,0), - visibleRect(0,0,0,0), videoRect(0,0,0,0), - frameRect(0,0,0,0), - frameBufferRect(0,0,0,0), invertVideo(false), - softwareDeinterlacer(QString::null), - hardwareDeinterlacing(false), + gl_context(NULL), video_dim(0,0), + actual_video_dim(0,0), viewportSize(0,0), + masterViewportSize(0,0), display_visible_rect(0,0,0,0), + display_video_rect(0,0,0,0), video_rect(0,0,0,0), + frameBufferRect(0,0,0,0), softwareDeinterlacer(QString::null), + hardwareDeinterlacer(QString::null), hardwareDeinterlacing(false), useColourControl(false), viewportControl(false), - frameBuffer(0), frameBufferTexture(0), inputTextureSize(0,0), currentFrameNum(0), - inputUpdated(false), - + inputUpdated(false), + textureRects(false), textureType(GL_TEXTURE_2D), + helperTexture(0), defaultUpsize(kGLFilterResize), convertSize(0,0), convertBuf(NULL), - - videoResize(false), videoResizeRect(0,0,0,0) + videoResize(false), videoResizeRect(0,0,0,0), + gl_features(0) { } OpenGLVideo::~OpenGLVideo() { + OpenGLContextLocker ctx_lock(gl_context); Teardown(); } @@ -63,55 +62,45 @@ { ShutDownYUV2RGB(); - gl_context->MakeCurrent(true); - - if (frameBuffer) - gl_context->DeleteFrameBuffer(frameBuffer); + if (helperTexture) + gl_context->DeleteTexture(helperTexture); + helperTexture = 0; - if (frameBufferTexture) - gl_context->DeleteTexture(frameBufferTexture); - - for (uint i = 0; i < inputTextures.size(); i++) - gl_context->DeleteTexture(inputTextures[i]); - inputTextures.clear(); + DeleteTextures(&inputTextures); + DeleteTextures(&referenceTextures); if (!filters.empty()) { glfilt_map_t::iterator it; - for (it = filters.begin(); it != filters.end(); ++it) + for (it = filters.begin(); it != filters.end(); it++) { - if (it->second->fragmentProgram) - gl_context->DeleteFragmentProgram(it->second->fragmentProgram); - vector temp = it->second->frameBuffers; - for (uint i = 0; i < temp.size(); i++) - gl_context->DeleteFrameBuffer(temp[i]); - temp = it->second->frameBufferTextures; - for (uint i = 0; i < temp.size(); i++) - gl_context->DeleteTexture((temp[i])); + RemoveFilter(it->first); } } filters.clear(); - - gl_context->MakeCurrent(false); } // locking ok bool OpenGLVideo::Init(OpenGLContext *glcontext, bool colour_control, - bool onscreen, QSize video_size, QRect visible_rect, - QRect video_rect, QRect frame_rect, - bool viewport_control, bool osd) + QSize videoDim, QRect displayVisibleRect, + QRect displayVideoRect, QRect videoRect, + bool viewport_control, QString options, bool osd) { gl_context = glcontext; - videoSize = video_size; - visibleRect = visible_rect; - videoRect = video_rect; - frameRect = frame_rect; - masterViewportSize = QSize(1920, 1080); + if (!gl_context) + return false; - QSize rect = GetTextureSize(videoSize); + OpenGLContextLocker ctx_lock(gl_context); - frameBufferRect = QRect(QPoint(0,0), rect); - invertVideo = true; + actual_video_dim = videoDim; + video_dim = videoDim; + if (video_dim.height() == 1088) + video_dim.setHeight(1080); + display_visible_rect = displayVisibleRect; + display_video_rect = displayVideoRect; + video_rect = videoRect; + masterViewportSize = QSize(1920, 1080); + frameBufferRect = QRect(QPoint(0,0), video_dim); softwareDeinterlacer = ""; hardwareDeinterlacing = false; useColourControl = colour_control; @@ -120,54 +109,53 @@ convertSize = QSize(0,0); videoResize = false; videoResizeRect = QRect(0,0,0,0); - frameBuffer = 0; currentFrameNum = -1; inputUpdated = false; - if (!onscreen) - { - QSize fb_size = GetTextureSize(visibleRect.size()); - if (!AddFrameBuffer(frameBuffer, frameBufferTexture, fb_size)) - return false; - } + gl_features = ParseOptions(options) & + gl_context->GetFeatures(); + + if (viewportControl) + gl_context->SetFeatures(gl_features); + + if (options.contains("openglbicubic")) + defaultUpsize = kGLFilterBicubic; + + if ((defaultUpsize != kGLFilterBicubic) && (gl_features & kGLExtRect)) + gl_context->GetTextureType(textureType, textureRects); + + SetViewPort(display_visible_rect.size()); - SetViewPort(visibleRect.size()); - InitOpenGL(); + bool use_pbo = gl_features & kGLExtPBufObj; if (osd) { - QSize osdsize = visibleRect.size(); - QSize half_size(osdsize.width() >> 1, osdsize.height() >>1); - GLuint alphatex = CreateVideoTexture(osdsize, inputTextureSize); - GLuint utex = CreateVideoTexture(half_size, inputTextureSize); - GLuint vtex = CreateVideoTexture(half_size, inputTextureSize); - GLuint ytex = CreateVideoTexture(osdsize, inputTextureSize); - - if ((alphatex && ytex && utex && vtex) && AddFilter(kGLFilterYUV2RGBA)) - { - inputTextures.push_back(ytex); - inputTextures.push_back(utex); - inputTextures.push_back(vtex); - inputTextures.push_back(alphatex); - if (!AddFilter(kGLFilterResize)) - { - Teardown(); - return false; - } + QSize osdsize = display_visible_rect.size(); + GLuint tex = CreateVideoTexture(osdsize, inputTextureSize, use_pbo); + + if (tex && + AddFilter(kGLFilterYUV2RGBA) && + AddFilter(kGLFilterResize)) + { + inputTextures.push_back(tex); + } + else + { + Teardown(); } } else { - QSize half_size(videoSize.width() >> 1, videoSize.height() >>1); - GLuint utex = CreateVideoTexture(half_size, inputTextureSize); - GLuint vtex = CreateVideoTexture(half_size, inputTextureSize); - GLuint ytex = CreateVideoTexture(videoSize, inputTextureSize);; - - if ((ytex && utex && vtex) && AddFilter(kGLFilterYUV2RGB)) - { - inputTextures.push_back(ytex); - inputTextures.push_back(utex); - inputTextures.push_back(vtex); + GLuint tex = CreateVideoTexture(actual_video_dim, + inputTextureSize, use_pbo); + + if (tex && AddFilter(kGLFilterYUV2RGB)) + { + inputTextures.push_back(tex); + } + else + { + Teardown(); } } @@ -184,11 +172,12 @@ "Falling back to software conversion.\n\t\t\t" "Any opengl filters will also be disabled."); - GLuint rgb24tex = CreateVideoTexture(videoSize, inputTextureSize); + GLuint bgra32tex = CreateVideoTexture(actual_video_dim, + inputTextureSize, use_pbo); - if (rgb24tex && AddFilter(kGLFilterResize)) + if (bgra32tex && AddFilter(kGLFilterResize)) { - inputTextures.push_back(rgb24tex); + inputTextures.push_back(bgra32tex); } else { @@ -198,68 +187,81 @@ } } +#ifdef MMX + bool mmx = true; +#else + bool mmx = false; +#endif + + CheckResize(false); + + VERBOSE(VB_PLAYBACK, LOC + + QString("Using packed textures with%1 mmx and with%2 PBOs") + .arg(mmx ? "" : "out").arg(use_pbo ? "" : "out")); + return true; } -OpenGLFilterType OpenGLVideo::GetDeintFilter(void) const +void OpenGLVideo::CheckResize(bool deinterlacing) { - if (filters.count(kGLFilterKernelDeint)) - return kGLFilterKernelDeint; - if (filters.count(kGLFilterLinearBlendDeint)) - return kGLFilterLinearBlendDeint; - if (filters.count(kGLFilterOneFieldDeint)) - return kGLFilterOneFieldDeint; - if (filters.count(kGLFilterBobDeintDFR)) - return kGLFilterBobDeintDFR; - if (filters.count(kGLFilterOneFieldDeintDFR)) - return kGLFilterOneFieldDeintDFR; - if (filters.count(kGLFilterLinearBlendDeintDFR)) - return kGLFilterLinearBlendDeintDFR; - if (filters.count(kGLFilterKernelDeintDFR)) - return kGLFilterKernelDeintDFR; - if (filters.count(kGLFilterFieldOrderDFR)) - return kGLFilterFieldOrderDFR; + // to improve performance on slower cards + bool resize_up = (video_dim.height() < display_video_rect.height()) || + (video_dim.width() < display_video_rect.width()); - return kGLFilterNone; -} + // to ensure deinterlacing works correctly + bool resize_down = (video_dim.height() > display_video_rect.height()) && + deinterlacing; -bool OpenGLVideo::OptimiseFilters(void) -{ - // if video height does not match display rect height, add resize stage - // to preserve field information N.B. assumes interlaced - // if video rectangle is smaller than display rectangle, add resize stage - // to improve performance - - bool needResize = ((videoSize.height() != videoRect.height()) || - (videoSize.width() < videoRect.width())); - if (needResize && !filters.count(kGLFilterResize) && - !(AddFilter(kGLFilterResize))) + if (resize_up && (defaultUpsize == kGLFilterBicubic)) { - return false; + RemoveFilter(kGLFilterResize); + filters.erase(kGLFilterResize); + AddFilter(kGLFilterBicubic); + return; + } + + if ((resize_up && (defaultUpsize == kGLFilterResize)) || resize_down) + { + RemoveFilter(kGLFilterBicubic); + filters.erase(kGLFilterBicubic); + AddFilter(kGLFilterResize); + return; } + if (!filters.count(kGLFilterYUV2RGBA)) + { + RemoveFilter(kGLFilterResize); + filters.erase(kGLFilterResize); + } + + RemoveFilter(kGLFilterBicubic); + filters.erase(kGLFilterBicubic); + + OptimiseFilters(); +} + +bool OpenGLVideo::OptimiseFilters(void) +{ glfilt_map_t::reverse_iterator it; // add/remove required frame buffer objects // and link filters uint buffers_needed = 1; bool last_filter = true; - bool needtorotate = false; for (it = filters.rbegin(); it != filters.rend(); it++) { - it->second->outputBuffer = kFrameBufferObject; - it->second->rotateFrameBuffers = needtorotate; if (!last_filter) { + it->second->outputBuffer = kFrameBufferObject; uint buffers_have = it->second->frameBuffers.size(); int buffers_diff = buffers_needed - buffers_have; if (buffers_diff > 0) { uint tmp_buf, tmp_tex; - QSize fb_size = GetTextureSize(videoSize); + QSize fb_size = GetTextureSize(video_dim); for (int i = 0; i < buffers_diff; i++) { - if (!AddFrameBuffer(tmp_buf, tmp_tex, fb_size)) + if (!AddFrameBuffer(tmp_buf, fb_size, tmp_tex, video_dim)) return false; else { @@ -286,25 +288,13 @@ } else { + it->second->outputBuffer = kDefaultBuffer; last_filter = false; } - buffers_needed = it->second->numInputs; - needtorotate = (it->first == kGLFilterKernelDeint || - it->first == kGLFilterLinearBlendDeint || - it->first == kGLFilterOneFieldDeintDFR || - it->first == kGLFilterLinearBlendDeintDFR || - it->first == kGLFilterKernelDeintDFR || - it->first == kGLFilterFieldOrderDFR); - } - bool deinterlacing = hardwareDeinterlacing; - hardwareDeinterlacing = true; - - SetDeinterlacing(false); - if (deinterlacing) - SetDeinterlacing(deinterlacing); + SetFiltering(); return true; } @@ -314,75 +304,30 @@ { // filter settings included for performance only // no (obvious) quality improvement over GL_LINEAR throughout - if (filters.empty()) - return; - - if (filters.size() == 1) + if (filters.empty() || filters.size() == 1) { - SetTextureFilters(&inputTextures, GL_LINEAR); + SetTextureFilters(&inputTextures, GL_LINEAR, GL_CLAMP_TO_EDGE); return; } - SetTextureFilters(&inputTextures, GL_NEAREST); - vector textures; - glfilt_map_t::iterator it; - for (it = filters.begin(); it != filters.end(); it++) - SetTextureFilters(&(it->second->frameBufferTextures), GL_NEAREST); + SetTextureFilters(&inputTextures, GL_NEAREST, GL_CLAMP_TO_EDGE); - // resize or last active (ie don't need resize) need GL_LINEAR glfilt_map_t::reverse_iterator rit; - bool next = false; - bool resize = filters.count(kGLFilterResize); + int last_filter = 0; + for (rit = filters.rbegin(); rit != filters.rend(); rit++) { - if (next && (rit->second->outputBuffer != kNoBuffer)) + if (last_filter == 1) { - SetTextureFilters(&(rit->second->frameBufferTextures), GL_LINEAR); - return; + SetTextureFilters(&(rit->second->frameBufferTextures), + GL_LINEAR, GL_CLAMP_TO_EDGE); } - - if (resize) + else if (last_filter > 1) { - next |= ((rit->first == kGLFilterResize) || - (rit->second->outputBuffer == kDefaultBuffer)); + SetTextureFilters(&(rit->second->frameBufferTextures), + GL_NEAREST, GL_CLAMP_TO_EDGE); } } - - SetTextureFilters(&inputTextures, GL_LINEAR); -} - -// locking ok -bool OpenGLVideo::ReInit(OpenGLContext *glcontext, bool colour_control, - bool onscreen, QSize video_size, QRect visible_rect, - QRect video_rect, QRect frame_rect, - bool viewport_control, bool osd) -{ - VERBOSE(VB_PLAYBACK, LOC + "Reinit"); - - gl_context->MakeCurrent(true); - - QString harddeint = GetDeinterlacer(); // only adds back deinterlacer - QString softdeint = softwareDeinterlacer; - bool interlacing = hardwareDeinterlacing; - bool resize = videoResize; - QRect resize_rect = videoResizeRect; - - Teardown(); - - bool success = Init(glcontext, colour_control, onscreen, video_size, - visible_rect, video_rect, frame_rect, - viewport_control, osd); - - if (harddeint != "") - success &= AddDeinterlacer(harddeint); - - softwareDeinterlacer = softdeint; - SetDeinterlacing(interlacing); - - if (resize) - SetVideoResize(resize_rect); - - return success; } // locking ok @@ -391,54 +336,49 @@ if (filters.count(filter)) return true; + bool success = true; + VERBOSE(VB_PLAYBACK, LOC + QString("Creating %1 filter.") .arg(FilterToString(filter))); - gl_context->MakeCurrent(true); - OpenGLFilter *temp = new OpenGLFilter(); temp->numInputs = 1; + GLuint program = 0; - if ((filter == kGLFilterLinearBlendDeint) || - (filter == kGLFilterKernelDeint) || - (filter == kGLFilterFieldOrderDFR)) - { - temp->numInputs = 2; - } - else if ((filter == kGLFilterYUV2RGB) || - (filter == kGLFilterOneFieldDeintDFR) || - (filter == kGLFilterKernelDeintDFR) || - (filter == kGLFilterLinearBlendDeintDFR)) + if (filter == kGLFilterBicubic) { - temp->numInputs = 3; - } - else if ((filter == kGLFilterYUV2RGBA)) - { - temp->numInputs = 4; + if (helperTexture) + gl_context->DeleteTexture(helperTexture); + + helperTexture = gl_context->CreateHelperTexture(); + if (!helperTexture) + success = false; } - GLuint program = 0; if (filter != kGLFilterNone && filter != kGLFilterResize) { program = AddFragmentProgram(filter); if (!program) - return false; + success = false; + else + temp->fragmentPrograms.push_back(program); } - temp->fragmentProgram = program; temp->outputBuffer = kDefaultBuffer; - temp->rotateFrameBuffers = false; temp->frameBuffers.clear(); temp->frameBufferTextures.clear(); filters[filter] = temp; - if (OptimiseFilters()) + success &= OptimiseFilters(); + + if (success) return true; RemoveFilter(filter); + filters.erase(filter); return false; } @@ -449,83 +389,153 @@ if (!filters.count(filter)) return true; - VERBOSE(VB_PLAYBACK, QString("Removing %1 filter") + VERBOSE(VB_PLAYBACK, LOC + QString("Removing %1 filter") .arg(FilterToString(filter))); - gl_context->MakeCurrent(true); - - gl_context->DeleteFragmentProgram(filters[filter]->fragmentProgram); - vector temp; vector::iterator it; - temp = filters[filter]->frameBuffers; + temp = filters[filter]->fragmentPrograms; for (it = temp.begin(); it != temp.end(); it++) - gl_context->DeleteFrameBuffer(*it); + gl_context->DeleteFragmentProgram(*it); + filters[filter]->fragmentPrograms.clear(); - temp = filters[filter]->frameBufferTextures; + temp = filters[filter]->frameBuffers; for (it = temp.begin(); it != temp.end(); it++) - gl_context->DeleteTexture((*(it))); + gl_context->DeleteFrameBuffer(*it); + filters[filter]->frameBuffers.clear(); - filters.erase(filter); + DeleteTextures(&(filters[filter]->frameBufferTextures)); - gl_context->MakeCurrent(false); + delete filters[filter]; return true; } // locking ok -bool OpenGLVideo::AddDeinterlacer(const QString &filter) +void OpenGLVideo::TearDownDeinterlacer(void) { - QString current_deinterlacer = GetDeinterlacer(); + if (!filters.count(kGLFilterYUV2RGB)) + return; - if (current_deinterlacer == filter) + OpenGLFilter *tmp = filters[kGLFilterYUV2RGB]; + + if (tmp->fragmentPrograms.size() == 3) + { + gl_context->DeleteFragmentProgram(tmp->fragmentPrograms[2]); + tmp->fragmentPrograms.pop_back(); + } + + if (tmp->fragmentPrograms.size() == 2) + { + gl_context->DeleteFragmentProgram(tmp->fragmentPrograms[1]); + tmp->fragmentPrograms.pop_back(); + } + + DeleteTextures(&referenceTextures); +} + +bool OpenGLVideo::AddDeinterlacer(const QString &deinterlacer) +{ + OpenGLContextLocker ctx_lock(gl_context); + + if (!filters.count(kGLFilterYUV2RGB)) + return false; + + if (hardwareDeinterlacer == deinterlacer) return true; - if (!current_deinterlacer.isEmpty()) - RemoveFilter(current_deinterlacer); + TearDownDeinterlacer(); + + bool success = true; - return AddFilter(filter); + uint ref_size = 2; + + if (deinterlacer == "openglbobdeint" || + deinterlacer == "openglonefield" || + deinterlacer == "opengldoubleratefieldorder") + { + ref_size = 0; + } + + if (ref_size > 0) + { + bool use_pbo = gl_features & kGLExtPBufObj; + + for (; ref_size > 0; ref_size--) + { + GLuint tex = CreateVideoTexture(actual_video_dim, inputTextureSize, use_pbo); + if (tex) + { + referenceTextures.push_back(tex); + } + else + { + success = false; + } + } + } + + uint prog1 = AddFragmentProgram(kGLFilterYUV2RGB, + deinterlacer, kScan_Interlaced); + uint prog2 = AddFragmentProgram(kGLFilterYUV2RGB, + deinterlacer, kScan_Intr2ndField); + + if (prog1 && prog2) + { + filters[kGLFilterYUV2RGB]->fragmentPrograms.push_back(prog1); + filters[kGLFilterYUV2RGB]->fragmentPrograms.push_back(prog2); + } + else + { + success = false; + } + + if (success) + { + CheckResize(hardwareDeinterlacing); + hardwareDeinterlacer = deinterlacer; + return true; + } + + hardwareDeinterlacer = ""; + TearDownDeinterlacer(); + + return false; } // locking ok -uint OpenGLVideo::AddFragmentProgram(OpenGLFilterType name) +uint OpenGLVideo::AddFragmentProgram(OpenGLFilterType name, + QString deint, FrameScanType field) { - if (!gl_context->IsFeatureSupported(kGLExtFragProg)) + if (!(gl_features & kGLExtFragProg)) { VERBOSE(VB_PLAYBACK, LOC_ERR + "Fragment programs not supported"); return 0; } - QString program = GetProgramString(name); - QString texType = (gl_context->IsFeatureSupported(kGLExtRect)) ? "RECT" : "2D"; - program.replace("%1", texType); + QString program = GetProgramString(name, deint, field); uint ret; if (gl_context->CreateFragmentProgram(program, ret)) - { - VERBOSE(VB_PLAYBACK, LOC + QString("Created fragment program %1.") - .arg(FilterToString(name))); - return ret; - } return 0; } // locking ok -bool OpenGLVideo::AddFrameBuffer(uint &framebuffer, - uint &texture, QSize size) +bool OpenGLVideo::AddFrameBuffer(uint &framebuffer, QSize fb_size, + uint &texture, QSize vid_size) { - if (!gl_context->IsFeatureSupported(kGLExtFBufObj)) + if (!(gl_features & kGLExtFBufObj)) { VERBOSE(VB_PLAYBACK, LOC_ERR + "Framebuffer binding not supported."); return false; } - texture = gl_context->CreateTexture(); + texture = gl_context->CreateTexture(fb_size, vid_size, false, textureType); - bool ok = gl_context->CreateFrameBuffer(framebuffer, texture, size); + bool ok = gl_context->CreateFrameBuffer(framebuffer, texture); if (!ok) gl_context->DeleteTexture(texture); @@ -536,8 +546,8 @@ // locking ok void OpenGLVideo::SetViewPort(const QSize &viewPortSize) { - uint w = max(viewPortSize.width(), videoSize.width()); - uint h = max(viewPortSize.height(), videoSize.height()); + uint w = max(viewPortSize.width(), video_dim.width()); + uint h = max(viewPortSize.height(), video_dim.height()); viewportSize = QSize(w, h); @@ -546,60 +556,26 @@ VERBOSE(VB_PLAYBACK, LOC + QString("Viewport: %1x%2") .arg(w).arg(h)); - - SetViewPortPrivate(viewportSize); -} - -void OpenGLVideo::SetViewPortPrivate(const QSize &viewPortSize) -{ - glViewport(0, 0, viewPortSize.width(), viewPortSize.height()); - glMatrixMode(GL_PROJECTION); - glLoadIdentity(); - glOrtho(0, viewPortSize.width() - 1, - 0, viewPortSize.height() - 1, 1, -1); // aargh... - glMatrixMode(GL_MODELVIEW); - glLoadIdentity(); -} - -// locking ok -void OpenGLVideo::InitOpenGL(void) -{ - gl_context->MakeCurrent(true); - glDisable(GL_BLEND); - glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); // for gl osd - glDisable(GL_DEPTH_TEST); - glDepthMask(GL_FALSE); - glDisable(GL_CULL_FACE); - gl_context->EnableTextures();; - glShadeModel(GL_FLAT); - glDisable(GL_POLYGON_SMOOTH); - glDisable(GL_LINE_SMOOTH); - glDisable(GL_POINT_SMOOTH); - glClearColor(0.0f, 0.0f, 0.0f, 0.0f); - glClear(GL_COLOR_BUFFER_BIT); - glFlush(); - gl_context->MakeCurrent(false); + gl_context->SetViewPort(viewportSize); } // locking ok -uint OpenGLVideo::CreateVideoTexture(QSize size, QSize &tex_size) +uint OpenGLVideo::CreateVideoTexture(QSize size, QSize &tex_size, + bool use_pbo) { - uint tmp_tex = gl_context->CreateTexture(); - QSize temp = GetTextureSize(size); + uint tmp_tex = gl_context->CreateTexture(temp, size, use_pbo, + textureType); - if ((temp.width() > (int)gl_context->GetMaxTexSize()) || - (temp.height() > (int)gl_context->GetMaxTexSize()) || - !gl_context->SetupTexture(temp, tmp_tex, GL_LINEAR)) + if (!tmp_tex) { VERBOSE(VB_PLAYBACK, LOC_ERR + "Could not create texture."); - gl_context->DeleteTexture(tmp_tex); return 0; } tex_size = temp; - VERBOSE(VB_PLAYBACK, LOC + QString("Created main input texture %1x%2") + VERBOSE(VB_PLAYBACK, LOC + QString("Created texture (%1x%2)") .arg(temp.width()).arg(temp.height())); return tmp_tex; @@ -608,7 +584,7 @@ // locking ok QSize OpenGLVideo::GetTextureSize(const QSize &size) { - if (gl_context->IsFeatureSupported(kGLExtRect)) + if (textureRects) return size; int w = 64; @@ -628,10 +604,12 @@ } // locking ok -void OpenGLVideo::UpdateInputFrame(const VideoFrame *frame) +void OpenGLVideo::UpdateInputFrame(const VideoFrame *frame, bool soft_bob) { - if (frame->width != videoSize.width() || - frame->height != videoSize.height() || + OpenGLContextLocker ctx_lock(gl_context); + + if (frame->width != actual_video_dim.width() || + frame->height != actual_video_dim.height() || frame->width < 1 || frame->height < 1) { @@ -641,64 +619,65 @@ if (filters.count(kGLFilterYUV2RGB) && (frame->codec == FMT_YV12)) { - UpdateInput(frame->buf, frame->offsets, 0, FMT_YV12, videoSize); + if (hardwareDeinterlacing) + RotateTextures(); + + gl_context->UpdateTexture(inputTextures[0], frame->buf, + frame->offsets, frame->pitches, FMT_YV12, + frame->interlaced_frame && !soft_bob); + inputUpdated = true; return; } // software yuv2rgb - if (convertSize != videoSize) + if (convertSize != actual_video_dim) { ShutDownYUV2RGB(); VERBOSE(VB_PLAYBACK, LOC + "Init software conversion."); - convertSize = videoSize; + convertSize = actual_video_dim; convertBuf = new unsigned char[ - (videoSize.width() * videoSize.height() * 3) + 128]; + (actual_video_dim.width() * actual_video_dim.height() * 4) + 128]; } if (convertBuf) { AVPicture img_in, img_out; - avpicture_fill(&img_out, (uint8_t *)convertBuf, PIX_FMT_RGB24, + avpicture_fill(&img_out, (uint8_t *)convertBuf, PIX_FMT_BGRA, convertSize.width(), convertSize.height()); avpicture_fill(&img_in, (uint8_t *)frame->buf, PIX_FMT_YUV420P, convertSize.width(), convertSize.height()); - img_convert(&img_out, PIX_FMT_RGB24, + img_convert(&img_out, PIX_FMT_BGRA, &img_in, PIX_FMT_YUV420P, convertSize.width(), convertSize.height()); int offset = 0; - UpdateInput(convertBuf, &offset, 0, FMT_RGB24, convertSize); + gl_context->UpdateTexture(inputTextures[0], convertBuf, + &offset, &offset, FMT_BGRA); } + + inputUpdated = true; } // locking ok void OpenGLVideo::UpdateInput(const unsigned char *buf, const int *offsets, - uint texture_index, int format, QSize size) + int format, QSize size, + const unsigned char *alpha) { - inputUpdated = false; + OpenGLContextLocker ctx_lock(gl_context); - if (texture_index >= inputTextures.size()) + if (size.width() != actual_video_dim.width() || + size.height() != actual_video_dim.height() || + format != FMT_YV12 || !alpha) return; - copy_pixels_to_texture( - buf + offsets[0], format, size, - inputTextures[texture_index], gl_context->GetTextureType()); - - if (FMT_YV12 == format) - { - QSize chroma_size(size.width() >> 1, size.height() >> 1); - copy_pixels_to_texture( - buf + offsets[1], format, chroma_size, - inputTextures[texture_index + 1], - gl_context->GetTextureType()); - copy_pixels_to_texture( - buf + offsets[2], format, chroma_size, - inputTextures[texture_index + 2], - gl_context->GetTextureType()); - } + int pitches[3] = {size.width(), size.width() >> 1, size.width() >> 1}; + + gl_context->UpdateTexture(inputTextures[0], buf, + offsets, pitches, FMT_YV12, + false, alpha); inputUpdated = true; } @@ -718,14 +697,16 @@ // TODO shouldn't this take a QSize, not QRect? void OpenGLVideo::SetVideoResize(const QRect &rect) { - bool abort = ((rect.right() > videoSize.width()) || - (rect.bottom() > videoSize.height()) || - (rect.width() > videoSize.width()) || - (rect.height() > videoSize.height())); + OpenGLContextLocker ctx_lock(gl_context); + + bool abort = ((rect.right() > video_dim.width()) || + (rect.bottom() > video_dim.height()) || + (rect.width() > video_dim.width()) || + (rect.height() > video_dim.height())); // if resize == existing frame, no need to carry on - abort |= !rect.left() && !rect.top() && (rect.size() == videoSize); + abort |= !rect.left() && !rect.top() && (rect.size() == video_dim); if (!abort) { @@ -740,6 +721,8 @@ // locking ok void OpenGLVideo::DisableVideoResize(void) { + OpenGLContextLocker ctx_lock(gl_context); + videoResize = false; videoResizeRect = QRect(0, 0, 0, 0); } @@ -749,19 +732,19 @@ { // FIXME video aspect == display aspect - if ((videoSize.height() <= 0) || (videoSize.width() <= 0)) + if ((video_dim.height() <= 0) || (video_dim.width() <= 0)) return; - float height = visibleRect.height(); + float height = display_visible_rect.height(); float new_top = height - ((float)videoResizeRect.bottom() / - (float)videoSize.height()) * height; + (float)video_dim.height()) * height; float new_bottom = height - ((float)videoResizeRect.top() / - (float)videoSize.height()) * height; + (float)video_dim.height()) * height; - left = (((float) videoResizeRect.left() / (float) videoSize.width()) * - visibleRect.width()); - right = (((float) videoResizeRect.right() / (float) videoSize.width()) * - visibleRect.width()); + left = (((float) videoResizeRect.left() / (float) video_dim.width()) * + display_visible_rect.width()); + right = (((float) videoResizeRect.right() / (float) video_dim.width()) * + display_visible_rect.width()); top = new_top; bottom = new_bottom; @@ -773,37 +756,10 @@ if (deinterlacing == hardwareDeinterlacing) return; - VERBOSE(VB_PLAYBACK, LOC + QString("Turning %1 deinterlacing.") - .arg(deinterlacing ? "on" : "off")); - hardwareDeinterlacing = deinterlacing; - glfilt_map_t::iterator it = filters.begin(); - for (; it != filters.end(); it++) - { - it->second->outputBuffer = kFrameBufferObject; - - if ((it->first >= kGLFilterLinearBlendDeint) && - (it->first <= kGLFilterOneFieldDeintDFR) && - !deinterlacing) - { - it->second->outputBuffer = kNoBuffer; - } - } - - glfilt_map_t::reverse_iterator rit = filters.rbegin(); - for (; rit != filters.rend(); rit++) - { - if (rit->second->outputBuffer == kFrameBufferObject) - { - rit->second->outputBuffer = kDefaultBuffer; - break; - } - } - - gl_context->MakeCurrent(true); - SetFiltering(); - gl_context->MakeCurrent(false); + OpenGLContextLocker ctx_lock(gl_context); + CheckResize(hardwareDeinterlacing); } // locking ok @@ -813,24 +769,19 @@ if (inputTextures.empty() || filters.empty()) return; + OpenGLContextLocker ctx_lock(gl_context); + + // enable correct texture type + gl_context->EnableTextures(inputTextures[0]); + vector inputs = inputTextures; QSize inputsize = inputTextureSize; + QSize realsize = GetTextureSize(video_dim); uint numfilters = filters.size(); glfilt_map_t::iterator it; for (it = filters.begin(); it != filters.end(); it++) { - if (it->second->rotateFrameBuffers && - !(it->first == kGLFilterYUV2RGB && scan == kScan_Intr2ndField)) - { - Rotate(&(it->second->frameBufferTextures)); - Rotate(&(it->second->frameBuffers)); - } - - // skip disabled filters - if (it->second->outputBuffer == kNoBuffer) - continue; - OpenGLFilterType type = it->first; OpenGLFilter *filter = it->second; @@ -838,37 +789,27 @@ if (!inputUpdated && type == kGLFilterYUV2RGBA) { inputs = filter->frameBufferTextures; - inputsize = videoSize; - continue; - } - - // skip colour conversion for frames already in frame buffer - if (!inputUpdated && (frame == currentFrameNum) && - (type == kGLFilterYUV2RGB) && (frame != 0) && - (!(softwareDeinterlacing && softwareDeinterlacer == "bobdeint"))) - { - inputs = filter->frameBufferTextures; - inputsize = videoSize; + inputsize = realsize; continue; } // texture coordinates - float t_right = (float)videoSize.width(); - float t_bottom = (float)videoSize.height(); + float t_right = (float)video_dim.width(); + float t_bottom = (float)video_dim.height(); float t_top = 0.0f; float t_left = 0.0f; - float trueheight = (float)videoSize.height(); + float trueheight = (float)video_dim.height(); // only apply overscan on last filter if (filter->outputBuffer == kDefaultBuffer) { - t_left = (float)frameRect.left(); - t_right = (float)frameRect.width() + t_left; - t_top = (float)frameRect.top(); - t_bottom = (float)frameRect.height() + t_top; + t_left = (float)video_rect.left(); + t_right = (float)video_rect.width() + t_left; + t_top = (float)video_rect.top(); + t_bottom = (float)video_rect.height() + t_top; } - if (!gl_context->IsFeatureSupported(kGLExtRect) && + if (!textureRects && (inputsize.width() > 0) && (inputsize.height() > 0)) { t_right /= inputsize.width(); @@ -878,36 +819,20 @@ trueheight /= inputsize.height(); } - float line_height = (trueheight / (float)videoSize.height()); - float bob = line_height / 2.0f; - - if (type == kGLFilterBobDeintDFR) + // software bobdeint + if ((softwareDeinterlacer == "bobdeint") && + softwareDeinterlacing && + (filter->outputBuffer == kDefaultBuffer)) { - if (scan == kScan_Interlaced) - { - t_bottom += bob; - t_top += bob; - } + float bob = (trueheight / (float)video_dim.height()) / 4.0f; if (scan == kScan_Intr2ndField) { - t_bottom -= bob; - t_top -= bob; - } - } - - if (softwareDeinterlacer == "bobdeint" && - softwareDeinterlacing && (type == kGLFilterYUV2RGB || - (type == kGLFilterResize && numfilters == 1))) - { - bob = line_height / 4.0f; - if (scan == kScan_Interlaced) - { t_top /= 2; t_bottom /= 2; t_bottom += bob; t_top += bob; } - if (scan == kScan_Intr2ndField) + if (scan == kScan_Interlaced) { t_top = (trueheight / 2) + (t_top / 2); t_bottom = (trueheight / 2) + (t_bottom / 2); @@ -916,36 +841,42 @@ } } - float t_right_uv = t_right; - float t_top_uv = t_top; - float t_bottom_uv = t_bottom; - float t_left_uv = t_left; - - if (gl_context->IsFeatureSupported(kGLExtRect)) - { - t_right_uv /= 2; - t_top_uv /= 2; - t_bottom_uv /= 2; - t_left_uv /= 2; - } - // vertex coordinates - QRect display = (filter->frameBuffers.empty() || - filter->outputBuffer == kDefaultBuffer) ? - videoRect : frameBufferRect; + QRect display = (filter->frameBuffers.empty() || + filter->outputBuffer == kDefaultBuffer) ? + display_video_rect : frameBufferRect; float vleft = display.left(); float vright = display.right(); float vtop = display.top(); float vbot = display.bottom(); + // hardware bobdeint + if (filter->outputBuffer == kDefaultBuffer && + hardwareDeinterlacing && + hardwareDeinterlacer == "openglbobdeint") + { + float bob = ((float)display.height() / (float)video_dim.height()) + / 2.0f; + if (scan == kScan_Interlaced) + { + vbot -= bob; + vtop -= bob; + } + if (scan == kScan_Intr2ndField) + { + vbot += bob; + vtop += bob; + } + } + // resize for interactive tv if (videoResize && filter->outputBuffer == kDefaultBuffer) CalculateResize(vleft, vtop, vright, vbot); - if (invertVideo && - ((type == kGLFilterYUV2RGB) || (type == kGLFilterYUV2RGBA)) || - ((type == kGLFilterResize) && (numfilters == 1))) + // invert horizontally + if (((type == kGLFilterYUV2RGB) || (type == kGLFilterYUV2RGBA)) || + (filter->outputBuffer == kDefaultBuffer && numfilters == 1)) { float temp = vtop; vtop = vbot; @@ -956,18 +887,15 @@ switch (filter->outputBuffer) { case kDefaultBuffer: - if (frameBuffer) - gl_context->BindFramebuffer(frameBuffer); - // clear the buffer if (viewportControl) { glClear(GL_COLOR_BUFFER_BIT); - SetViewPortPrivate(visibleRect.size()); + gl_context->SetViewPort(display_visible_rect.size()); } else { - SetViewPortPrivate(masterViewportSize); + gl_context->SetViewPort(masterViewportSize); } break; @@ -976,56 +904,67 @@ if (!filter->frameBuffers.empty()) { gl_context->BindFramebuffer(filter->frameBuffers[0]); - SetViewPortPrivate(frameBufferRect.size()); + gl_context->SetViewPort(frameBufferRect.size()); } break; - case kNoBuffer: + default: continue; } // bind correct textures - for (uint i = 0; i < inputs.size(); i++) + uint active_tex = 0; + for (; active_tex < inputs.size(); active_tex++) { - glActiveTexture(GL_TEXTURE0 + i); - glBindTexture(gl_context->GetTextureType(), inputs[i]); + glActiveTexture(GL_TEXTURE0 + active_tex); + glBindTexture(textureType, inputs[active_tex]); + } + + if (!referenceTextures.empty() && + hardwareDeinterlacing && + type == kGLFilterYUV2RGB) + { + uint max = inputs.size() + referenceTextures.size(); + uint ref = 0; + for (; active_tex < max; active_tex++, ref++) + { + glActiveTexture(GL_TEXTURE0 + active_tex); + glBindTexture(textureType, referenceTextures[ref]); + } + } + + if (helperTexture && type == kGLFilterBicubic) + { + glActiveTexture(GL_TEXTURE0 + active_tex); + glBindTexture(GL_TEXTURE_1D/*N.B.*/, helperTexture); } // enable fragment program and set any environment variables if ((type != kGLFilterNone) && (type != kGLFilterResize)) { glEnable(GL_FRAGMENT_PROGRAM_ARB); - gl_context->BindFragmentProgram(filter->fragmentProgram); - float field = -line_height; + GLuint program = 0; + + if (type == kGLFilterYUV2RGB) + { + if (hardwareDeinterlacing && + filter->fragmentPrograms.size() == 3) + { + if (scan == kScan_Interlaced) + program = 1; + else if (scan == kScan_Intr2ndField) + program = 2; + } + } + + gl_context->BindFragmentProgram(filter->fragmentPrograms[program]); switch (type) { case kGLFilterYUV2RGB: case kGLFilterYUV2RGBA: if (useColourControl) - { - gl_context->InitFragmentParams( - 0, - pictureAttribs[kPictureAttribute_Brightness], - pictureAttribs[kPictureAttribute_Contrast], - pictureAttribs[kPictureAttribute_Colour], - 0.0f); - } - break; - - case kGLFilterBobDeintDFR: - case kGLFilterOneFieldDeintDFR: - case kGLFilterKernelDeintDFR: - case kGLFilterFieldOrderDFR: - case kGLFilterLinearBlendDeintDFR: - if (scan == kScan_Intr2ndField) - field *= -1; - - case kGLFilterOneFieldDeint: - case kGLFilterKernelDeint: - case kGLFilterLinearBlendDeint: - gl_context->InitFragmentParams( - 0, line_height * 2.0f, field, 0.0f, 0.0f); + gl_context->SetColourParams(); break; case kGLFilterNone: @@ -1041,43 +980,15 @@ // draw quad glBegin(GL_QUADS); glTexCoord2f(t_left, t_top); - if (type == kGLFilterYUV2RGB || type == kGLFilterYUV2RGBA) - { - glMultiTexCoord2f(GL_TEXTURE1, t_left_uv, t_top_uv); - glMultiTexCoord2f(GL_TEXTURE2, t_left_uv, t_top_uv); - if (type == kGLFilterYUV2RGBA) - glMultiTexCoord2f(GL_TEXTURE3, t_left_uv, t_top_uv); - } glVertex2f(vleft, vtop); glTexCoord2f(t_right, t_top); - if (type == kGLFilterYUV2RGB || type == kGLFilterYUV2RGBA) - { - glMultiTexCoord2f(GL_TEXTURE1, t_right_uv, t_top_uv); - glMultiTexCoord2f(GL_TEXTURE2, t_right_uv, t_top_uv); - if (type == kGLFilterYUV2RGBA) - glMultiTexCoord2f(GL_TEXTURE3, t_right, t_top); - } glVertex2f(vright, vtop); glTexCoord2f(t_right, t_bottom); - if (type == kGLFilterYUV2RGB || type == kGLFilterYUV2RGBA) - { - glMultiTexCoord2f(GL_TEXTURE1, t_right_uv, t_bottom_uv); - glMultiTexCoord2f(GL_TEXTURE2, t_right_uv, t_bottom_uv); - if (type == kGLFilterYUV2RGBA) - glMultiTexCoord2f(GL_TEXTURE3, t_right, t_bottom); - } glVertex2f(vright, vbot); glTexCoord2f(t_left, t_bottom); - if (type == kGLFilterYUV2RGB || type == kGLFilterYUV2RGBA) - { - glMultiTexCoord2f(GL_TEXTURE1, t_left_uv, t_bottom_uv); - glMultiTexCoord2f(GL_TEXTURE2, t_left_uv, t_bottom_uv); - if (type == kGLFilterYUV2RGBA) - glMultiTexCoord2f(GL_TEXTURE3, t_left_uv, t_bottom); - } glVertex2f(vleft, vbot); glEnd(); @@ -1093,76 +1004,50 @@ } // switch back to default framebuffer - if (filter->outputBuffer != kDefaultBuffer || frameBuffer) + if (filter->outputBuffer != kDefaultBuffer) gl_context->BindFramebuffer(0); inputs = filter->frameBufferTextures; - inputsize = videoSize; + inputsize = realsize; } currentFrameNum = frame; inputUpdated = false; } -void OpenGLVideo::Rotate(vector *target) +void OpenGLVideo::RotateTextures(void) { - if (target->size() < 2) + if (referenceTextures.size() < 2) return; - GLuint tmp = (*target)[target->size() - 1]; - for (uint i = target->size() - 1; i > 0; i--) - (*target)[i] = (*target)[i - 1]; + GLuint tmp = referenceTextures[referenceTextures.size() - 1]; - (*target)[0] = tmp; + for (uint i = referenceTextures.size() - 1; i > 0; i--) + referenceTextures[i] = referenceTextures[i - 1]; + + referenceTextures[0] = inputTextures[0]; + inputTextures[0] = tmp; } -// locking ok -int OpenGLVideo::SetPictureAttribute( - PictureAttribute attribute, int newValue) +void OpenGLVideo::DeleteTextures(vector *textures) { - if (!useColourControl) - return -1; - - int ret = -1; - switch (attribute) - { - case kPictureAttribute_Brightness: - ret = newValue; - pictureAttribs[attribute] = (newValue * 0.02f) - 0.5f; - break; - case kPictureAttribute_Contrast: - case kPictureAttribute_Colour: - ret = newValue; - pictureAttribs[attribute] = (newValue * 0.02f); - break; - case kPictureAttribute_Hue: // not supported yet... - break; - default: - break; - } - - return ret; -} + if ((*textures).empty()) + return; -PictureAttributeSupported -OpenGLVideo::GetSupportedPictureAttributes(void) const -{ - return (!useColourControl) ? - kPictureAttributeSupported_None : - (PictureAttributeSupported) - (kPictureAttributeSupported_Brightness | - kPictureAttributeSupported_Contrast | - kPictureAttributeSupported_Colour); + for (uint i = 0; i < (*textures).size(); i++) + gl_context->DeleteTexture((*textures)[i]); + (*textures).clear(); } // locking ok -void OpenGLVideo::SetTextureFilters(vector *textures, int filt) +void OpenGLVideo::SetTextureFilters(vector *textures, + int filt, int wrap) { if (textures->empty()) return; for (uint i = 0; i < textures->size(); i++) - gl_context->SetupTextureFilters((*textures)[i], filt); + gl_context->SetTextureFilters((*textures)[i], filt, wrap); } // locking ok @@ -1174,24 +1059,10 @@ ret = kGLFilterYUV2RGB; else if (filter.contains("osd")) ret = kGLFilterYUV2RGBA; - else if (filter.contains("openglkerneldeint")) - ret = kGLFilterKernelDeint; - else if (filter.contains("opengllinearblend")) - ret = kGLFilterLinearBlendDeint; - else if (filter.contains("openglonefield")) - ret = kGLFilterOneFieldDeint; - else if (filter.contains("openglbobdeint")) - ret = kGLFilterBobDeintDFR; - else if (filter.contains("opengldoubleratelinearblend")) - ret = kGLFilterLinearBlendDeintDFR; - else if (filter.contains("opengldoubleratekerneldeint")) - ret = kGLFilterKernelDeintDFR; - else if (filter.contains("opengldoublerateonefield")) - ret = kGLFilterOneFieldDeintDFR; - else if (filter.contains("opengldoubleratefieldorder")) - ret = kGLFilterFieldOrderDFR; else if (filter.contains("resize")) ret = kGLFilterResize; + else if (filter.contains("bicubic")) + ret = kGLFilterBicubic; return ret; } @@ -1207,294 +1078,552 @@ return "master"; case kGLFilterYUV2RGBA: return "osd"; - case kGLFilterKernelDeint: - return "openglkerneldeint"; - case kGLFilterLinearBlendDeint: - return "opengllinearblend"; - case kGLFilterOneFieldDeint: - return "openglonefield"; - case kGLFilterBobDeintDFR: - return "openglbobdeint"; - case kGLFilterLinearBlendDeintDFR: - return "opengldoubleratelinearblend"; - case kGLFilterKernelDeintDFR: - return "opengldoubleratekerneldeint"; - case kGLFilterOneFieldDeintDFR: - return "opengldoublerateonefield"; - case kGLFilterFieldOrderDFR: - return "opengldoubleratefieldorder"; case kGLFilterResize: return "resize"; + case kGLFilterBicubic: + return "bicubic"; } return ""; } -static const QString yuv2rgb1a = -"ATTRIB ytex = fragment.texcoord[0];" -"ATTRIB uvtex = fragment.texcoord[1];" -"TEMP res, tmp;"; - -static const QString yuv2rgb1b = -"TEMP alpha;" -"TEX alpha, ytex, texture[3], %1;"; - -static const QString yuv2rgb1c = -"TEX res, ytex, texture[0], %1;" -"TEX tmp.x, uvtex, texture[1], %1;" -"TEX tmp.y, uvtex, texture[2], %1;"; - -static const QString yuv2rgb2 = -"PARAM adj = program.env[0];" -"SUB res, res, 0.5;" -"MAD res, res, adj.yyyy, adj.xxxx;" -"SUB tmp, tmp, { 0.5, 0.5 };" -"MAD tmp, adj.zzzz, tmp, 0.5;"; - -static const QString yuv2rgb3 = -"MAD res, res, 1.164, -0.063;" -"SUB tmp, tmp, { 0.5, 0.5 };" -"MAD res, { 0, -.392, 2.017 }, tmp.xxxw, res;"; - -static const QString yuv2rgb4 = -"MAD result.color, { 1.596, -.813, 0, 0 }, tmp.yyyw, res;"; +static const QString attrib_fast = +"ATTRIB tex = fragment.texcoord[0];\n"; -static const QString yuv2rgb5 = -"MAD result.color, { 0, -.813, 1.596, 0 }, tmp.yyyw, res.bgra;"; +static const QString var_alpha = +"TEMP alpha;\n"; -static const QString yuv2rgb6 = -"MOV result.color.a, alpha.a;"; +static const QString tex_alpha = +"TEX alpha, tex, texture[3], %1;\n"; -// locking ok -QString OpenGLVideo::GetProgramString(OpenGLFilterType name) +static const QString tex_fast = +"TEX res, tex, texture[0], %1;\n"; + +static const QString param_colour = +"PARAM adj = program.env[0];\n"; + +static const QString calc_colour_fast = +"SUB res, res, 0.5;\n" +"MAD res, res, adj.zzzy, adj.wwwx;\n"; + +static const QString end_alpha = +"MOV result.color.a, alpha.a;\n"; + +static const QString var_fast = +"TEMP tmp, res;\n"; + +static const QString calc_fast_alpha = +"MOV result.color.a, res.g;\n"; + +static const QString end_fast = +"SUB tmp, res.rbgg, { 0.5, 0.5 };\n" +"MAD res, res.a, 1.164, -0.063;\n" +"MAD res, { 0, -.392, 2.017 }, tmp.xxxw, res;\n" +"MAD result.color, { 1.596, -.813, 0, 0 }, tmp.yyyw, res;\n"; + +static const QString end_fast_alpha = +"SUB tmp, res.rbgg, { 0.5, 0.5 };\n" +"MAD res, res.a, 1.164, -0.063;\n" +"MAD res, { 0, -.392, 2.017 }, tmp.xxxw, res;\n" +"MAD result.color.rgb, { 1.596, -.813, 0, 0 }, tmp.yyyw, res;\n"; + +static const QString var_deint = +"TEMP other, current, mov, prev;\n"; + +static const QString field_calc = +"MUL prev, tex.yyyy, %2;\n" +"FRC prev, prev;\n" +"SUB prev, prev, 0.5;\n"; + +static const QString bobdeint[2] = { +field_calc + +"ADD other, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX other, other, texture[0], %1;\n" +"CMP res, prev, res, other;\n", +field_calc + +"SUB other, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX other, other, texture[0], %1;\n" +"CMP res, prev, other, res;\n" +}; + +static const QString deint_end_top = +"CMP other, mov, current, other;\n" +"CMP res, prev, current, other;\n"; + +static const QString deint_end_bot = +"CMP other, mov, current, other;\n" +"CMP res, prev, other, current;\n"; + +static const QString motion_calc = +"ABS mov, mov;\n" +"SUB mov, mov, 0.07;\n"; + +static const QString motion_top = +"SUB mov, prev, current;\n" + motion_calc; + +static const QString motion_bot = +"SUB mov, res, current;\n" + motion_calc; + +static const QString doublerateonefield[2] = { +"TEX current, tex, texture[1], %1;\n" +"TEX prev, tex, texture[2], %1;\n" +"ADD other, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX other, other, texture[1], %1;\n" ++ motion_top + field_calc + deint_end_top, + +"TEX current, tex, texture[1], %1;\n" +"SUB other, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX other, other, texture[1], %1;\n" ++ motion_bot + field_calc + deint_end_bot +}; + +static const QString linearblend[2] = { +"TEX current, tex, texture[1], %1;\n" +"TEX prev, tex, texture[2], %1;\n" +"ADD other, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX other, other, texture[1], %1;\n" +"SUB mov, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX mov, mov, texture[1], %1;\n" +"LRP other, 0.5, other, mov;\n" ++ motion_top + field_calc + deint_end_top, + +"TEX current, tex, texture[1], %1;\n" +"SUB other, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX other, other, texture[1], %1;\n" +"ADD mov, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX mov, mov, texture[1], %1;\n" +"LRP other, 0.5, other, mov;\n" ++ motion_bot + field_calc + deint_end_bot +}; + +static const QString kerneldeint[2] = { +"TEX current, tex, texture[1], %1;\n" +"TEX prev, tex, texture[2], %1;\n" ++ motion_top + +"MUL other, 0.125, prev;\n" +"MAD other, 0.125, current, other;\n" +"ADD prev, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX prev, prev, texture[1], %1;\n" +"MAD other, 0.5, prev, other;\n" +"SUB prev, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX prev, prev, texture[1], %1;\n" +"MAD other, 0.5, prev, other;\n" +"ADD prev, tex, {0.0, %4, 0.0, 0.0};\n" +"TEX mov, prev, texture[1], %1;\n" +"MAD other, -0.0625, mov, other;\n" +"TEX mov, prev, texture[2], %1;\n" +"MAD other, -0.0625, mov, other;\n" +"SUB prev, tex, {0.0, %4, 0.0, 0.0};\n" +"TEX mov, prev, texture[1], %1;\n" +"MAD other, -0.0625, mov, other;\n" +"TEX mov, prev, texture[2], %1;\n" +"MAD other, -0.0625, mov, other;\n" ++ field_calc + deint_end_top, + +"TEX current, tex, texture[1], %1;\n" ++ motion_bot + +"MUL other, 0.125, res;\n" +"MAD other, 0.125, current, other;\n" +"ADD prev, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX prev, prev, texture[1], %1;\n" +"MAD other, 0.5, prev, other;\n" +"SUB prev, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX prev, prev, texture[1], %1;\n" +"MAD other, 0.5, prev, other;\n" +"ADD prev, tex, {0.0, %4, 0.0, 0.0};\n" +"TEX mov, prev, texture[1], %1;\n" +"MAD other, -0.0625, mov, other;\n" +"TEX mov, prev, texture[0], %1;\n" +"MAD other, -0.0625, mov, other;\n" +"SUB prev, tex, {0.0, %4, 0.0, 0.0};\n" +"TEX mov, prev, texture[1], %1;\n" +"MAD other, -0.0625, mov, other;\n" +"TEX mov, prev, texture[0], %1;\n" +"MAD other, -0.0625, mov, other;\n" ++ field_calc + deint_end_bot +}; + +static const QString yadif_setup = +"TEMP a,b,c,e,f,g,h,j,k,l;\n" +"TEMP a1,b1,f1,g1,h1,i1,j1,l1,m1,n1;\n" +"ALIAS d1 = f;\n" +"ALIAS k1 = g;\n" +"ALIAS c1 = prev;\n" +"ALIAS e1 = mov;\n" +"ALIAS p0 = res;\n" +"ALIAS p1 = c;\n" +"ALIAS p3 = h;\n" +"ALIAS spred1 = a;\n" +"ALIAS spred2 = b;\n" +"ALIAS spred3 = c;\n" +"ALIAS spred4 = e;\n" +"ALIAS spred5 = f;\n" +"ALIAS sscore = g;\n" +"ALIAS score1 = h;\n" +"ALIAS score2 = j;\n" +"ALIAS score3 = k;\n" +"ALIAS score4 = l;\n" +"ALIAS if1 = a1;\n" +"ALIAS if2 = b1;\n" +"TEMP p2, p4;\n" +"ALIAS diff1 = a;\n" +"ALIAS diff2 = b;\n" +"TEMP diff0;\n"; + +static const QString yadif_spatial_sample = +"ADD tmp, tex, {%5, %3, 0.0, 0.0};\n" +"TEX e1, tmp, texture[1], %1;\n" +"ADD tmp, tmp, {%5, 0.0, 0.0, 0.0};\n" +"TEX f1, tmp, texture[1], %1;\n" +"ADD tmp, tmp, {%5, 0.0, 0.0, 0.0};\n" +"TEX g1, tmp, texture[1], %1;\n" +"SUB tmp, tmp, {0.0, %4, 0.0, 0.0};\n" +"TEX n1, tmp, texture[1], %1;\n" +"SUB tmp, tmp, {%5, 0.0, 0.0, 0.0};\n" +"TEX m1, tmp, texture[1], %1;\n" +"SUB tmp, tmp, {%5, 0.0, 0.0, 0.0};\n" +"TEX l1, tmp, texture[1], %1;\n" + +"SUB tmp, tex, {%5, %3, 0.0, 0.0};\n" +"TEX j1, tmp, texture[1], %1;\n" +"SUB tmp, tmp, {%5, 0.0, 0.0, 0.0};\n" +"TEX i1, tmp, texture[1], %1;\n" +"SUB tmp, tmp, {%5, 0.0, 0.0, 0.0};\n" +"TEX h1, tmp, texture[1], %1;\n" +"ADD tmp, tmp, {0.0, %4, 0.0, 0.0};\n" +"TEX a1, tmp, texture[1], %1;\n" +"ADD tmp, tmp, {%5, 0.0, 0.0, 0.0};\n" +"TEX b1, tmp, texture[1], %1;\n" +"ADD tmp, tmp, {%5, 0.0, 0.0, 0.0};\n" +"TEX c1, tmp, texture[1], %1;\n"; + +static const QString yadif_calc = +"LRP p0, 0.5, c, h;\n" +"MOV p1, f;\n" +"LRP p2, 0.5, d, i;\n" +"MOV p3, g;\n" +"LRP p4, 0.5, e, j;\n" + +"SUB diff0, d, i;\n" +"ABS diff0, diff0;\n" +"SUB tmp, a, f;\n" +"ABS tmp, tmp;\n" +"SUB diff1, b, g;\n" +"ABS diff1, diff1;\n" +"LRP diff1, 0.5, diff1, tmp;\n" +"SUB tmp, k, f;\n" +"ABS tmp, tmp;\n" +"SUB diff2, g, l;\n" +"ABS diff2, diff2;\n" +"LRP diff2, 0.5, diff2, tmp;\n" +"MAX diff0, diff0, diff1;\n" +"MAX diff0, diff0, diff2;\n" + +// mode < 2 +"SUB tmp, p0, p1;\n" +"SUB other, p4, p3;\n" +"MIN spred1, tmp, other;\n" +"MAX spred2, tmp, other;\n" +"SUB tmp, p2, p1;\n" +"SUB other, p2, p3;\n" +"MAX spred1, spred1, tmp;\n" +"MAX spred1, spred1, other;\n" +"MIN spred2, spred2, tmp;\n" +"MIN spred2, spred2, other;\n" +"MAX spred1, spred2, -spred1;\n" +"MAX diff0, diff0, spred1;\n" + +// spatial prediction +"LRP spred1, 0.5, d1, k1;\n" +"LRP spred2, 0.5, c1, l1;\n" +"LRP spred3, 0.5, b1, m1;\n" +"LRP spred4, 0.5, e1, j1;\n" +"LRP spred5, 0.5, f1, i1;\n" + +"SUB sscore, c1, j1;\n" +"ABS sscore, sscore;\n" +"SUB tmp, d1, k1;\n" +"ABS tmp, tmp;\n" +"ADD sscore, sscore, tmp;\n" +"SUB tmp, e1, l1;\n" +"ABS tmp, tmp;\n" +"ADD sscore, sscore, tmp;\n" +"SUB sscore, sscore, 1.0;\n" + +"SUB score1, b1, k1;\n" +"ABS score1, score1;\n" +"SUB tmp, c1, l1;\n" +"ABS tmp, tmp;\n" +"ADD score1, score1, tmp;\n" +"SUB tmp, d1, m1;\n" +"ABS tmp, tmp;\n" +"ADD score1, score1, tmp;\n" + +"SUB score2, a1, l1;\n" +"ABS score2, score2;\n" +"SUB tmp, b1, m1;\n" +"ABS tmp, tmp;\n" +"ADD score2, score2, tmp;\n" +"SUB tmp, c1, n1;\n" +"ABS tmp, tmp;\n" +"ADD score2, score2, tmp;\n" + +"SUB score3, d1, i1;\n" +"ABS score3, score3;\n" +"SUB tmp, e1, j1;\n" +"ABS tmp, tmp;\n" +"ADD score3, score3, tmp;\n" +"SUB tmp, f1, k1;\n" +"ABS tmp, tmp;\n" +"ADD score3, score3, tmp;\n" + +"SUB score4, e1, h1;\n" +"ABS score4, score4;\n" +"SUB tmp, f1, i1;\n" +"ABS tmp, tmp;\n" +"ADD score4, score4, tmp;\n" +"SUB tmp, g1, j1;\n" +"ABS tmp, tmp;\n" +"ADD score4, score4, tmp;\n" +"SUB if1, sscore, score1;\n" +"SUB if2, score1, score2;\n" +"CMP if2, if1, -1.0, if2;\n" +"CMP spred1, if1, spred1, spred2;\n" +"CMP spred1, if2, spred1, spred3;\n" +"CMP sscore, if1, sscore, score1;\n" +"CMP sscore, if2, sscore, score2;\n" +"SUB if1, sscore, score3;\n" +"SUB if2, score3, score4;\n" +"CMP if2, if1, -1.0, if2;\n" +"CMP spred1, if1, spred1, spred4;\n" +"CMP spred1, if2, spred1, spred5;\n" +"ADD spred4, p2, diff0;\n" +"SUB spred5, p2, diff0;\n" +"SUB if1, spred4, spred1;\n" +"SUB if2, spred1, spred5;\n" +"CMP spred1, if1, spred4, spred1;\n" +"CMP spred1, if2, spred5, spred1;\n"; + +static const QString yadif[2] = { +yadif_setup + +"TEMP d;\n" +"ALIAS i = current;\n" +"TEX current, tex, texture[1], %1;\n" +"TEX d, tex, texture[2], %1;\n" +"ADD tmp, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX a, tmp, texture[2], %1;\n" +"TEX f, tmp, texture[1], %1;\n" +"TEX k, tmp, texture[0], %1;\n" +"ADD tmp, tex, {0.0, %4, 0.0, 0.0};\n" +"TEX c, tmp, texture[2], %1;\n" +"TEX h, tmp, texture[1], %1;\n" +"SUB tmp, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX b, tmp, texture[2], %1;\n" +"TEX g, tmp, texture[1], %1;\n" +"TEX l, tmp, texture[0], %1;\n" +"SUB tmp, tex, {0.0, %4, 0.0, 0.0};\n" +"TEX e, tmp, texture[2], %1;\n" +"TEX j, tmp, texture[1], %1;\n" ++ yadif_spatial_sample ++ yadif_calc ++ field_calc + +"CMP res, prev, current, spred1;\n" +, +yadif_setup + +"TEMP i;\n" +"ALIAS d = current;\n" +"TEX current, tex, texture[1], %1;\n" +"TEX i, tex, texture[0], %1;\n" +"ADD tmp, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX a, tmp, texture[2], %1;\n" +"TEX f, tmp, texture[1], %1;\n" +"TEX k, tmp, texture[0], %1;\n" +"ADD tmp, tex, {0.0, %4, 0.0, 0.0};\n" +"TEX c, tmp, texture[1], %1;\n" +"TEX h, tmp, texture[0], %1;\n" +"SUB tmp, tex, {0.0, %3, 0.0, 0.0};\n" +"TEX b, tmp, texture[2], %1;\n" +"TEX g, tmp, texture[1], %1;\n" +"TEX l, tmp, texture[0], %1;\n" +"SUB tmp, tex, {0.0, %4, 0.0, 0.0};\n" +"TEX e, tmp, texture[1], %1;\n" +"TEX j, tmp, texture[0], %1;\n" ++ yadif_spatial_sample ++ yadif_calc ++ field_calc + +"CMP res, prev, spred1, current;\n" +}; + +static const QString bicubic = +"TEMP coord, coord2, cdelta, parmx, parmy, a, b, c, d;\n" +"MAD coord.xy, fragment.texcoord[0], {%6, %7}, {0.5, 0.5};\n" +"TEX parmx, coord.x, texture[1], 1D;\n" +"TEX parmy, coord.y, texture[1], 1D;\n" +"MUL cdelta.xz, parmx.rrgg, {-%5, 0, %5, 0};\n" +"MUL cdelta.yw, parmy.rrgg, {0, -%3, 0, %3};\n" +"ADD coord, fragment.texcoord[0].xyxy, cdelta.xyxw;\n" +"ADD coord2, fragment.texcoord[0].xyxy, cdelta.zyzw;\n" +"TEX a, coord.xyxy, texture[0], 2D;\n" +"TEX b, coord.zwzw, texture[0], 2D;\n" +"TEX c, coord2.xyxy, texture[0], 2D;\n" +"TEX d, coord2.zwzw, texture[0], 2D;\n" +"LRP a, parmy.b, a, b;\n" +"LRP c, parmy.b, c, d;\n" +"LRP result.color, parmx.b, a, c;\n"; + +QString OpenGLVideo::GetProgramString(OpenGLFilterType name, + QString deint, FrameScanType field) { QString ret = "!!ARBfp1.0\n" - "OPTION ARB_precision_hint_fastest;"; + "OPTION ARB_precision_hint_fastest;\n"; switch (name) { case kGLFilterYUV2RGB: - ret = ret + yuv2rgb1a + yuv2rgb1c; - if (useColourControl) - ret += yuv2rgb2; - ret += yuv2rgb3; - ret += frameBuffer ? yuv2rgb5 : yuv2rgb4; - break; + { + bool need_tex = true; + QString deint_bit = ""; + if (deint != "") + { + uint tmp_field = 0; + if (field == kScan_Intr2ndField) + tmp_field = 1; + if (deint == "openglbobdeint" || + deint == "openglonefield" || + deint == "opengldoubleratefieldorder") + { + deint_bit = bobdeint[tmp_field]; + } + else if (deint == "opengldoublerateonefield") + { + deint_bit = doublerateonefield[tmp_field]; + if (!tmp_field) { need_tex = false; } + } + else if (deint == "opengllinearblend" || + deint == "opengldoubleratelinearblend") + { + deint_bit = linearblend[tmp_field]; + if (!tmp_field) { need_tex = false; } + } + else if (deint == "openglkerneldeint" || + deint == "opengldoubleratekerneldeint") + { + deint_bit = kerneldeint[tmp_field]; + if (!tmp_field) { need_tex = false; } + } + else if (deint == "openglyadif" || + deint == "opengldoublerateyadif") + { + deint_bit = yadif[tmp_field]; + need_tex = false; + } + else + { + VERBOSE(VB_PLAYBACK, LOC + + "Unrecognised OpenGL deinterlacer"); + } + } - case kGLFilterYUV2RGBA: - ret = ret + yuv2rgb1a + yuv2rgb1b + yuv2rgb1c; - if (useColourControl) - ret += yuv2rgb2; - ret = ret + yuv2rgb3 + yuv2rgb4 + yuv2rgb6; + ret += attrib_fast; + ret += useColourControl ? param_colour : ""; + ret += (deint != "") ? var_deint : ""; + ret += var_fast + (need_tex ? tex_fast : ""); + ret += deint_bit; + ret += useColourControl ? calc_colour_fast : ""; + ret += end_fast; + } break; + case kGLFilterYUV2RGBA: - case kGLFilterKernelDeint: - ret += - "ATTRIB tex = fragment.texcoord[0];" - "PARAM off = program.env[0];" - "TEMP sam, pos, cum, cur, field, mov;" - "RCP field, off.x;" - "MUL field, tex.yyyy, field;" - "FRC field, field;" - "SUB field, field, 0.5;" - "TEX sam, tex, texture[1], %1;" - "TEX cur, tex, texture[0], %1;" - "SUB mov, cur, sam;" - "MUL cum, sam, 0.125;" - "MAD cum, cur, 0.125, cum;" - "ABS mov, mov;" - "SUB mov, mov, 0.12;" - "ADD pos, tex, off.wyww;" - "TEX sam, pos, texture[0], %1;" - "MAD cum, sam, 0.5, cum;" - "SUB pos, tex, off.wyww;" - "TEX sam, pos, texture[0], %1;" - "MAD cum, sam, 0.5, cum;" - "MAD pos, off.wyww, 2.0, tex;" - "TEX sam, pos, texture[0], %1;" - "MAD cum, sam, -0.0625, cum;" - "TEX sam, pos, texture[1], %1;" - "MAD cum, sam, -0.0625, cum;" - "MAD pos, off.wyww, -2.0, tex;" - "TEX sam, pos, texture[0], %1;" - "MAD cum, sam, -0.0625, cum;" - "TEX sam, pos, texture[1], %1;" - "MAD cum, sam, -0.0625, cum;" - "CMP cum, mov, cur, cum;" - "CMP result.color, field, cum, cur;"; - break; + ret += attrib_fast; + ret += useColourControl ? param_colour : ""; + ret += var_fast + tex_fast + calc_fast_alpha; + ret += useColourControl ? calc_colour_fast : ""; + ret += end_fast_alpha; - case kGLFilterLinearBlendDeintDFR: - ret += - "ATTRIB tex = fragment.texcoord[0];" - "PARAM off = program.env[0];" - "TEMP field, top, bot, current, previous, next, other, mov;" - "TEX next, tex, texture[0], %1;" - "TEX current, tex, texture[1], %1;" - "TEX previous, tex, texture[2], %1;" - "ADD top, tex, off.wyww;" - "TEX other, top, texture[1], %1;" - "SUB top, tex, off.wyww;" - "TEX bot, top, texture[1], %1;" - "LRP other, 0.5, other, bot;" - "RCP field, off.x;" - "MUL field, tex.yyyy, field;" - "FRC field, field;" - "SUB field, field, 0.5;" - "SUB top, current, next;" - "SUB bot, current, previous;" - "CMP mov, field, bot, top;" - "ABS mov, mov;" - "SUB mov, mov, 0.12;" - "CMP other, mov, current, other;" - "CMP top, field, other, current;" - "CMP bot, field, current, other;" - "CMP result.color, off.y, top, bot;"; break; - case kGLFilterOneFieldDeintDFR: - ret += - "ATTRIB tex = fragment.texcoord[0];" - "PARAM off = program.env[0];" - "TEMP field, top, bot, current, previous, next, other, mov;" - "TEX next, tex, texture[0], %1;" - "TEX current, tex, texture[1], %1;" - "TEX previous, tex, texture[2], %1;" - "ADD top, tex, off.wyww;" - "TEX other, top, texture[1], %1;" - "RCP field, off.x;" - "MUL field, tex.yyyy, field;" - "FRC field, field;" - "SUB field, field, 0.5;" - "SUB top, current, next;" - "SUB bot, current, previous;" - "CMP mov, field, bot, top;" - "ABS mov, mov;" - "SUB mov, mov, 0.12;" - "CMP other, mov, current, other;" - "CMP top, field, other, current;" - "CMP bot, field, current, other;" - "CMP result.color, off.y, top, bot;"; + case kGLFilterNone: + case kGLFilterResize: break; - case kGLFilterKernelDeintDFR: - ret += - "ATTRIB tex = fragment.texcoord[0];" - "PARAM off = program.env[0];" - "TEMP sam, pos, bot, top, cur, pre, nex, field, mov;" - "RCP field, off.x;" - "MUL field, tex.yyyy, field;" - "FRC field, field;" - "SUB field, field, 0.5;" - "TEX pre, tex, texture[2], %1;" // -1,0 - "TEX cur, tex, texture[1], %1;" // 0,0 - "TEX nex, tex, texture[0], %1;" // +1,0 - "SUB top, nex, cur;" - "SUB bot, pre, cur;" - "CMP mov, field, bot, top;" - "ABS mov, mov;" - "SUB mov, mov, 0.12;" - "MUL bot, pre, 0.125;" // BOT -1,0 - "MAD bot, cur, 0.125, bot;" // BOT +1,0 - "MUL top, cur, 0.125;" // TOP -1,0 - "MAD top, nex, 0.125, top;" // TOP +1,0 - "ADD pos, tex, off.wyww;" - "TEX sam, pos, texture[1], %1;" // 0,+1 - "MAD bot, sam, 0.5, bot;" // BOT 0,+1 - "MAD top, sam, 0.5, top;" // TOP 0,+1 - "SUB pos, tex, off.wyww;" - "TEX sam, pos, texture[1], %1;" // 0,-1 - "MAD bot, sam, 0.5, bot;" // BOT 0,-1 - "MAD top, sam, 0.5, top;" // TOP 0,-1 - "MAD pos, off.wyww, 2.0, tex;" - "TEX sam, pos, texture[1], %1;" // 0,+2 - "MAD bot, sam, -0.0625, bot;" // BOT +1,+2 - "MAD top, sam, -0.0625, top;" // TOP -1,+2 - "TEX sam, pos, texture[2], %1;" // -1,+2 - "MAD bot, sam, -0.0625, bot;" // BOT -1,+2 - "TEX sam, pos, texture[0], %1;" // +1,+2 - "MAD top, sam, -0.0625, top;" // TOP +1,+2 - "MAD pos, off.wyww, -2.0, tex;" - "TEX sam, pos, texture[1], %1;" // +1,-2 - "MAD bot, sam, -0.0625, bot;" // BOT +1,-2 - "MAD top, sam, -0.0625, top;" // TOP -1,-2 - "TEX sam, pos, texture[2], %1;" // -1, -2 row - "MAD bot, sam, -0.0625, bot;" // BOT -1,-2 - "TEX sam, pos, texture[0], %1;" // +1,-2 - "MAD top, sam, -0.0625, top;" // TOP +1,-2 - "CMP top, mov, cur, top;" - "CMP bot, mov, cur, bot;" - "CMP top, field, top, cur;" - "CMP bot, field, cur, bot;" - "CMP result.color, off.y, top, bot;"; + case kGLFilterBicubic: + + ret += bicubic; break; - case kGLFilterBobDeintDFR: - case kGLFilterOneFieldDeint: - ret += - "ATTRIB tex = fragment.texcoord[0];" - "PARAM off = program.env[0];" - "TEMP field, top, bottom, current, other;" - "TEX current, tex, texture[0], %1;" - "RCP field, off.x;" - "MUL field, tex.yyyy, field;" - "FRC field, field;" - "SUB field, field, 0.5;" - "ADD top, tex, off.wyww;" - "TEX other, top, texture[0], %1;" - "CMP top, field, other, current;" - "CMP bottom, field, current, other;" - "CMP result.color, off.y, top, bottom;"; + default: + VERBOSE(VB_PLAYBACK, LOC_ERR + "Unknown fragment program."); break; + } - case kGLFilterLinearBlendDeint: - ret += - "ATTRIB tex = fragment.texcoord[0];" - "PARAM off = program.env[0];" - "TEMP mov, field, cur, pre, pos;" - "RCP field, off.x;" - "MUL field, tex.yyyy, field;" - "FRC field, field;" - "SUB field, field, 0.5;" - "TEX cur, tex, texture[0], %1;" - "TEX pre, tex, texture[1], %1;" - "SUB mov, cur, pre;" - "ABS mov, mov;" - "SUB mov, mov, 0.12;" - "ADD pos, tex, off.wyww;" - "TEX pre, pos, texture[0], %1;" - "SUB pos, tex, off.wyww;" - "TEX pos, pos, texture[0], %1;" - "LRP pre, 0.5, pos, pre;" - "CMP pre, field, pre, cur;" - "CMP result.color, mov, cur, pre;"; - break; + QString temp = textureRects ? "RECT" : "2D"; + ret.replace("%1", temp); - case kGLFilterFieldOrderDFR: - ret += - "ATTRIB tex = fragment.texcoord[0];" - "PARAM off = program.env[0];" - "TEMP field, cur, pre, bot;" - "TEX cur, tex, texture[0], %1;" - "TEX pre, tex, texture[1], %1;" - "RCP field, off.x;" - "MUL field, tex.yyyy, field;" - "FRC field, field;" - "SUB field, field, 0.5;" - "CMP bot, off.y, pre, cur;" - "CMP result.color, field, bot, cur;"; + float lineHeight = 1.0f; + float colWidth = 1.0f; + QSize fb_size = GetTextureSize(video_dim); - break; + if (!textureRects && + (inputTextureSize.height() > 0)) + { + lineHeight /= inputTextureSize.height(); + colWidth /= inputTextureSize.width(); + } - case kGLFilterNone: - case kGLFilterResize: - break; + float fieldSize = 1.0f / (lineHeight * 2.0); - default: - VERBOSE(VB_PLAYBACK, LOC_ERR + "Unknown fragment program."); - break; + ret.replace("%2", temp.setNum(fieldSize, 'f', 8)); + ret.replace("%3", temp.setNum(lineHeight, 'f', 8)); + ret.replace("%4", temp.setNum(lineHeight * 2.0, 'f', 8)); + ret.replace("%5", temp.setNum(colWidth, 'f', 8)); + ret.replace("%6", temp.setNum((float)fb_size.width(), 'f', 1)); + ret.replace("%7", temp.setNum((float)fb_size.height(), 'f', 1)); + + ret += "END"; + + VERBOSE(VB_PLAYBACK, LOC + QString("Created %1 fragment program %2") + .arg(FilterToString(name)).arg(deint)); + + return ret; +} + +uint OpenGLVideo::ParseOptions(QString options) +{ + uint ret = kGLMaxFeat - 1; + + QStringList list = QStringList::split(",", options); + + if (list.empty()) + return ret; + + for (QStringList::Iterator i = list.begin(); + i != list.end(); ++i) + { + QString name = (*i).section('=', 0, 0); + QString opts = (*i).section('=', 1); + + if (name == "opengloptions") + { + if (opts.contains("nofinish")) + ret -= kGLFinish; + if (opts.contains("nofence")) + ret -= kGLNVFence; + if (opts.contains("nopbo")) + ret -= kGLExtPBufObj; + if (opts.contains("nopbuf")) + ret -= kGLXPBuffer; + if (opts.contains("nofbo")) + ret -= kGLExtFBufObj; + if (opts.contains("nofrag")) + ret -= kGLExtFragProg; + if (opts.contains("norect")) + ret -= kGLExtRect; + return ret; + } } - return ret + "END"; + return ret; } + diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/openglvideo.h mythtv/libs/libmythtv/openglvideo.h --- mythtv.ori/libs/libmythtv/openglvideo.h 2009-01-23 16:38:27.000000000 +1100 +++ mythtv/libs/libmythtv/openglvideo.h 2009-02-10 14:00:22.000000000 +1100 @@ -9,6 +9,7 @@ #include #include "videooutbase.h" +#include "videoouttypes.h" enum OpenGLFilterType { @@ -18,25 +19,13 @@ kGLFilterYUV2RGB, kGLFilterYUV2RGBA, - // Frame rate preserving deinterlacers - kGLFilterLinearBlendDeint, - kGLFilterKernelDeint, - kGLFilterOneFieldDeint, - - // Frame rate doubling deinterlacers - kGLFilterBobDeintDFR, - kGLFilterLinearBlendDeintDFR, - kGLFilterKernelDeintDFR, - kGLFilterFieldOrderDFR, - kGLFilterOneFieldDeintDFR, - // Frame scaling/resizing filters kGLFilterResize, + kGLFilterBicubic, }; enum DisplayBuffer { - kNoBuffer = 0, // disable filter kDefaultBuffer, kFrameBufferObject }; @@ -54,28 +43,25 @@ OpenGLVideo(); ~OpenGLVideo(); - bool Init(OpenGLContext *glcontext, bool colour_control, bool onscreen, - QSize video_size, QRect visible_rect, - QRect video_rect, QRect frame_rect, - bool viewport_control, bool osd = FALSE); - bool ReInit(OpenGLContext *gl, bool colour_control, bool onscreen, - QSize video_size, QRect visible_rect, - QRect video_rect, QRect frame_rect, - bool viewport_control, bool osd = FALSE); + bool Init(OpenGLContext *glcontext, bool colour_control, + QSize videoDim, QRect displayVisibleRect, + QRect displayVideoRect, QRect videoRect, + bool viewport_control, QString options, bool osd = FALSE); - void UpdateInputFrame(const VideoFrame *frame); + void UpdateInputFrame(const VideoFrame *frame, bool soft_bob = FALSE); void UpdateInput(const unsigned char *buf, const int *offsets, - uint texture_index, int format, QSize size); + int format, QSize size, + const unsigned char *alpha); bool AddFilter(const QString &filter) { return AddFilter(StringToFilter(filter)); } bool RemoveFilter(const QString &filter) { return RemoveFilter(StringToFilter(filter)); } - bool AddDeinterlacer(const QString &filter); + bool AddDeinterlacer(const QString &deinterlacer); void SetDeinterlacing(bool deinterlacing); QString GetDeinterlacer(void) const - { return FilterToString(GetDeintFilter()); }; + { return hardwareDeinterlacer; } void SetSoftwareDeinterlacer(const QString &filter) { softwareDeinterlacer = QDeepCopy(filter); }; @@ -84,67 +70,75 @@ void SetMasterViewport(QSize size) { masterViewportSize = size; } QSize GetViewPort(void) const { return viewportSize; } - void SetVideoRect(const QRect &vidrect, const QRect &framerect) - { videoRect = vidrect; frameRect = framerect;} - QSize GetVideoSize(void) const { return videoSize; } + void SetVideoRect(const QRect &dispvidrect, const QRect &vidrect) + { display_video_rect = dispvidrect; video_rect = vidrect;} + QSize GetVideoSize(void) const { return actual_video_dim;} void SetVideoResize(const QRect &rect); void DisableVideoResize(void); - int SetPictureAttribute(PictureAttribute attributeType, int newValue); - PictureAttributeSupported GetSupportedPictureAttributes(void) const; private: void Teardown(void); void SetViewPort(const QSize &new_viewport_size); - void SetViewPortPrivate(const QSize &new_viewport_size); bool AddFilter(OpenGLFilterType filter); bool RemoveFilter(OpenGLFilterType filter); + void CheckResize(bool deinterlacing); bool OptimiseFilters(void); - OpenGLFilterType GetDeintFilter(void) const; - bool AddFrameBuffer(uint &framebuffer, uint &texture, QSize size); - uint AddFragmentProgram(OpenGLFilterType name); - uint CreateVideoTexture(QSize size, QSize &tex_size); - QString GetProgramString(OpenGLFilterType filter); + bool AddFrameBuffer(uint &framebuffer, QSize fb_size, + uint &texture, QSize vid_size); + uint AddFragmentProgram(OpenGLFilterType name, + QString deint = QString::null, + FrameScanType field = kScan_Progressive); + uint CreateVideoTexture(QSize size, QSize &tex_size, + bool use_pbo = false); + QString GetProgramString(OpenGLFilterType filter, + QString deint = QString::null, + FrameScanType field = kScan_Progressive); void CalculateResize(float &left, float &top, float &right, float &bottom); static QString FilterToString(OpenGLFilterType filter); static OpenGLFilterType StringToFilter(const QString &filter); void ShutDownYUV2RGB(void); - void SetViewPort(bool last_stage); - void InitOpenGL(void); QSize GetTextureSize(const QSize &size); void SetFiltering(void); - void Rotate(vector *target); - void SetTextureFilters(vector *textures, int filt); + void RotateTextures(void); + void SetTextureFilters(vector *textures, int filt, int clamp); + void DeleteTextures(vector *textures); + void TearDownDeinterlacer(void); + uint ParseOptions(QString options); OpenGLContext *gl_context; - QSize videoSize; + QSize video_dim; + QSize actual_video_dim; QSize viewportSize; QSize masterViewportSize; - QRect visibleRect; - QRect videoRect; - QRect frameRect; + QRect display_visible_rect; + QRect display_video_rect; + QRect video_rect; QRect frameBufferRect; - bool invertVideo; QString softwareDeinterlacer; + QString hardwareDeinterlacer; bool hardwareDeinterlacing; bool useColourControl; bool viewportControl; - uint frameBuffer; - uint frameBufferTexture; + vector referenceTextures; vector inputTextures; QSize inputTextureSize; glfilt_map_t filters; long long currentFrameNum; bool inputUpdated; + bool textureRects; + uint textureType; + uint helperTexture; + OpenGLFilterType defaultUpsize; - QSize convertSize; - unsigned char *convertBuf; + QSize convertSize; + unsigned char *convertBuf; - bool videoResize; - QRect videoResizeRect; + bool videoResize; + QRect videoResizeRect; - float pictureAttribs[kPictureAttribute_MAX]; + uint gl_features; }; #else // if !USING_OPENGL_VIDEO @@ -155,16 +149,13 @@ OpenGLVideo() { } ~OpenGLVideo() { } - bool Init(OpenGLContext*, bool, bool, QSize, QRect, - QRect, QRect, bool, bool osd = false) - { (void) osd; return false; } - - bool ReInit(OpenGLContext*, bool, bool, QSize, QRect, - QRect, QRect, bool, bool osd = false) + bool Init(OpenGLContext*, bool, QSize, QRect, + QRect, QRect, bool, QString, bool osd = false) { (void) osd; return false; } void UpdateInputFrame(const VideoFrame*) { } - void UpdateInput(const unsigned char*, const int*, uint, int, QSize) { } + void UpdateInput(const unsigned char*, const int*, + int, QSize, unsigned char* = NULL) { } bool AddFilter(const QString&) { return false; } bool RemoveFilter(const QString&) { return false; } @@ -182,9 +173,6 @@ QSize GetVideoSize(void) const { return QSize(0,0); } void SetVideoResize(const QRect&) { } void DisableVideoResize(void) { } - int SetPictureAttribute(PictureAttribute, int) { return -1; } - PictureAttributeSupported GetSupportedPictureAttributes(void) const - { return kPictureAttributeSupported_None; } }; #endif // !USING_OPENGL_VIDEO diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/tv_play.cpp mythtv/libs/libmythtv/tv_play.cpp --- mythtv.ori/libs/libmythtv/tv_play.cpp 2009-01-22 12:22:03.000000000 +1100 +++ mythtv/libs/libmythtv/tv_play.cpp 2009-02-11 20:35:10.000000000 +1100 @@ -2462,6 +2462,7 @@ return true; } case QEvent::Paint: + case QEvent::Show: { if (nvp) nvp->ExposeEvent(); diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/util-opengl.cpp mythtv/libs/libmythtv/util-opengl.cpp --- mythtv.ori/libs/libmythtv/util-opengl.cpp 2009-01-23 16:38:27.000000000 +1100 +++ mythtv/libs/libmythtv/util-opengl.cpp 2009-02-10 14:00:22.000000000 +1100 @@ -3,6 +3,19 @@ #include "util-opengl.h" #include "frame.h" +#ifdef MMX +extern "C" { +#include "libavcodec/i386/mmx.h" +} +#endif + +PFNGLMAPBUFFERPROC gMythGLMapBufferARB = NULL; +PFNGLBINDBUFFERARBPROC gMythGLBindBufferARB = NULL; +PFNGLGENBUFFERSARBPROC gMythGLGenBuffersARB = NULL; +PFNGLBUFFERDATAARBPROC gMythGLBufferDataARB = NULL; +PFNGLUNMAPBUFFERARBPROC gMythGLUnmapBufferARB = NULL; +PFNGLDELETEBUFFERSARBPROC gMythGLDeleteBuffersARB = NULL; + PFNGLGENPROGRAMSARBPROC gMythGLGenProgramsARB = NULL; PFNGLBINDPROGRAMARBPROC gMythGLBindProgramARB = NULL; PFNGLPROGRAMSTRINGARBPROC gMythGLProgramStringARB = NULL; @@ -19,6 +32,11 @@ PFNGLXGETVIDEOSYNCSGIPROC gMythGLXGetVideoSyncSGI = NULL; PFNGLXWAITVIDEOSYNCSGIPROC gMythGLXWaitVideoSyncSGI = NULL; +PFNGLGENFENCESNVPROC gMythGLGenFencesNV = NULL; +PFNGLDELETEFENCESNVPROC gMythGLDeleteFencesNV = NULL; +PFNGLSETFENCENVPROC gMythGLSetFenceNV = NULL; +PFNGLFINISHFENCENVPROC gMythGLFinishFenceNV = NULL; + bool init_opengl(void) { static bool is_initialized = false; @@ -30,6 +48,19 @@ is_initialized = true; + gMythGLMapBufferARB = (PFNGLMAPBUFFERPROC) + get_gl_proc_address("glMapBufferARB"); + gMythGLBindBufferARB = (PFNGLBINDBUFFERARBPROC) + get_gl_proc_address("glBindBufferARB"); + gMythGLGenBuffersARB = (PFNGLGENBUFFERSARBPROC) + get_gl_proc_address("glGenBuffersARB"); + gMythGLBufferDataARB = (PFNGLBUFFERDATAARBPROC) + get_gl_proc_address("glBufferDataARB"); + gMythGLUnmapBufferARB = (PFNGLUNMAPBUFFERARBPROC) + get_gl_proc_address("glUnmapBufferARB"); + gMythGLDeleteBuffersARB = (PFNGLDELETEBUFFERSARBPROC) + get_gl_proc_address("glDeleteBuffersARB"); + gMythGLGenProgramsARB = (PFNGLGENPROGRAMSARBPROC) get_gl_proc_address("glGenProgramsARB"); gMythGLBindProgramARB = (PFNGLBINDPROGRAMARBPROC) @@ -60,6 +91,15 @@ gMythGLXWaitVideoSyncSGI = (PFNGLXWAITVIDEOSYNCSGIPROC) get_gl_proc_address("glXWaitVideoSyncSGI"); + gMythGLGenFencesNV = (PFNGLGENFENCESNVPROC) + get_gl_proc_address("glGenFencesNV"); + gMythGLDeleteFencesNV = (PFNGLDELETEFENCESNVPROC) + get_gl_proc_address("glDeleteFencesNV"); + gMythGLSetFenceNV = (PFNGLSETFENCENVPROC) + get_gl_proc_address("glSetFenceNV"); + gMythGLFinishFenceNV = (PFNGLFINISHFENCENVPROC) + get_gl_proc_address("glFinishFenceNV"); + return true; } @@ -101,7 +141,7 @@ if (!ret) return false; - + gl_minor=2; static_major = major = gl_major; static_minor = minor = gl_minor; static_ret = true; @@ -210,8 +250,7 @@ Window get_gl_window(Display *XJ_disp, Window XJ_curwin, XVisualInfo *visInfo, - const QSize &window_size, - bool map_window) + const QRect &window_rect) { X11L; @@ -220,11 +259,11 @@ XJ_disp, XJ_curwin, visInfo->visual, AllocNone); Window gl_window = XCreateWindow( - XJ_disp, XJ_curwin, 0, 0, window_size.width(), window_size.height(), 0, + XJ_disp, XJ_curwin, window_rect.x(), window_rect.y(), + window_rect.width(), window_rect.height(), 0, visInfo->depth, InputOutput, visInfo->visual, CWColormap, &attributes); - if (map_window) - XMapWindow(XJ_disp, gl_window); + XMapWindow(XJ_disp, gl_window); XFree(visInfo); @@ -267,41 +306,6 @@ return glx_window; } -void copy_pixels_to_texture(const unsigned char *buf, - int buffer_format, - const QSize &buffer_size, - int texture, - int texture_type) -{ - glBindTexture(texture_type, texture); - - uint format; - switch (buffer_format) - { - case FMT_YV12: - format = GL_LUMINANCE; - break; - case FMT_RGB24: - format = GL_RGB; - break; - case FMT_RGBA32: - format = GL_RGBA; - break; - case FMT_ALPHA: - format = GL_ALPHA; - break; - default: - return; - } - - glTexSubImage2D( - texture_type, - 0, 0, 0, - buffer_size.width(), buffer_size.height(), - format, GL_UNSIGNED_BYTE, - buf); -} - __GLXextFuncPtr get_gl_proc_address(const QString &procName) { __GLXextFuncPtr ret = NULL; @@ -371,3 +375,596 @@ return gMythGLXGetVideoSyncSGI && gMythGLXWaitVideoSyncSGI; } + +bool has_gl_pixelbuffer_object_support(const QString &ext) +{ + init_opengl(); + + if (!ext.contains("GL_ARB_pixel_buffer_object")) + return false; + + return (gMythGLMapBufferARB && + gMythGLBindBufferARB && + gMythGLGenBuffersARB && + gMythGLDeleteBuffersARB && + gMythGLBufferDataARB && + gMythGLUnmapBufferARB); +} + +bool has_gl_nvfence_support(const QString &ext) +{ + init_opengl(); + + if (!ext.contains("GL_NV_fence")) + return false; + + return (gMythGLGenFencesNV && + gMythGLDeleteFencesNV && + gMythGLSetFenceNV && + gMythGLFinishFenceNV); +} + +#ifdef MMX +static inline void mmx_pack_alpha_high(uint8_t *a1, uint8_t *a2, + uint8_t *y1, uint8_t *y2) +{ + movq_m2r (*a1, mm4); + punpckhbw_m2r (*y1, mm4); + movq_m2r (*a2, mm7); + punpckhbw_m2r (*y2, mm7); +} + +static inline void mmx_pack_alpha_low(uint8_t *a1, uint8_t *a2, + uint8_t *y1, uint8_t *y2) +{ + movq_m2r (*a1, mm4); + punpcklbw_m2r (*y1, mm4); + movq_m2r (*a2, mm7); + punpcklbw_m2r (*y2, mm7); +} + +static mmx_t mmx_1s = {0xffffffffffffffffLL}; + +static inline void mmx_pack_alpha1s_high(uint8_t *y1, uint8_t *y2) +{ + movq_m2r (mmx_1s, mm4); + punpckhbw_m2r (*y1, mm4); + movq_m2r (mmx_1s, mm7); + punpckhbw_m2r (*y2, mm7); +} + +static inline void mmx_pack_alpha1s_low(uint8_t *y1, uint8_t *y2) +{ + movq_m2r (mmx_1s, mm4); + punpcklbw_m2r (*y1, mm4); + movq_m2r (mmx_1s, mm7); + punpcklbw_m2r (*y2, mm7); +} + +static inline void mmx_pack_middle(uint8_t *dest1, uint8_t *dest2) +{ + movq_r2r (mm3, mm5); + punpcklbw_r2r (mm2, mm5); + + movq_r2r (mm5, mm6); + punpcklbw_r2r (mm4, mm6); + movq_r2m (mm6, *(dest1)); + + movq_r2r (mm5, mm6); + punpckhbw_r2r (mm4, mm6); + movq_r2m (mm6, *(dest1 + 8)); + + movq_r2r (mm5, mm6); + punpcklbw_r2r (mm7, mm6); + movq_r2m (mm6, *(dest2)); + + movq_r2r (mm5, mm6); + punpckhbw_r2r (mm7, mm6); + movq_r2m (mm6, *(dest2 + 8)); +} + +static inline void mmx_pack_end(uint8_t *dest1, uint8_t *dest2) +{ + punpckhbw_r2r (mm2, mm3); + + movq_r2r (mm3, mm6); + punpcklbw_r2r (mm4, mm6); + movq_r2m (mm6, *(dest1 + 16)); + + movq_r2r (mm3, mm6); + punpckhbw_r2r (mm4, mm6); + movq_r2m (mm6, *(dest1 + 24)); + + movq_r2r (mm3, mm6); + punpcklbw_r2r (mm7, mm6); + movq_r2m (mm6, *(dest2 + 16)); + + punpckhbw_r2r (mm7, mm3); + movq_r2m (mm3, *(dest2 + 24)); +} + +static inline void mmx_pack_easy(uint8_t *dest, uint8_t *y) +{ + movq_m2r (mmx_1s, mm4); + punpcklbw_m2r (*y, mm4); + + movq_r2r (mm3, mm5); + punpcklbw_r2r (mm2, mm5); + + movq_r2r (mm5, mm6); + punpcklbw_r2r (mm4, mm6); + movq_r2m (mm6, *(dest)); + + movq_r2r (mm5, mm6); + punpckhbw_r2r (mm4, mm6); + movq_r2m (mm6, *(dest + 8)); + + movq_m2r (mmx_1s, mm4); + punpckhbw_m2r (*y, mm4); + + punpckhbw_r2r (mm2, mm3); + + movq_r2r (mm3, mm6); + punpcklbw_r2r (mm4, mm6); + movq_r2m (mm6, *(dest + 16)); + + punpckhbw_r2r (mm4, mm3); + movq_r2m (mm3, *(dest + 24)); +} + +static mmx_t mmx_0s = {0x0000000000000000LL}; +static mmx_t round = {0x0002000200020002LL}; + +static inline void mmx_interp_start(uint8_t *left, uint8_t *right) +{ + movd_m2r (*left, mm5); + punpcklbw_m2r (mmx_0s, mm5); + + movq_r2r (mm5, mm4); + paddw_r2r (mm4, mm4); + paddw_r2r (mm5, mm4); + paddw_m2r (round, mm4); + + movd_m2r (*right, mm5); + punpcklbw_m2r (mmx_0s, mm5); + paddw_r2r (mm5, mm4); + + psrlw_i2r (2, mm4); +} + +static inline void mmx_interp_endu(void) +{ + movq_r2r (mm4, mm2); + psllw_i2r (8, mm2); + paddb_r2r (mm4, mm2); +} + +static inline void mmx_interp_endv(void) +{ + movq_r2r (mm4, mm3); + psllw_i2r (8, mm3); + paddb_r2r (mm4, mm3); +} + +static inline void mmx_pack_chroma(uint8_t *u, uint8_t *v) +{ + movd_m2r (*u, mm2); + movd_m2r (*v, mm3); + punpcklbw_r2r (mm2, mm2); + punpcklbw_r2r (mm3, mm3); +} +#endif // MMX + +static inline void c_interp(uint8_t *dest, uint8_t *a, uint8_t *b, + uint8_t *c, uint8_t *d) +{ + unsigned int tmp = (unsigned int) *a; + tmp *= 3; + tmp += 2; + tmp += (unsigned int) *c; + dest[0] = (uint8_t) (tmp >> 2); + + tmp = (unsigned int) *b; + tmp *= 3; + tmp += 2; + tmp += (unsigned int) *d; + dest[1] = (uint8_t) (tmp >> 2); + + tmp = (unsigned int) *c; + tmp *= 3; + tmp += 2; + tmp += (unsigned int) *a; + dest[2] = (uint8_t) (tmp >> 2); + + tmp = (unsigned int) *d; + tmp *= 3; + tmp += 2; + tmp += (unsigned int) *b; + dest[3] = (uint8_t) (tmp >> 2); +} + +void pack_yv12alpha(const unsigned char *source, + const unsigned char *dest, + const int *offsets, + const int *pitches, + const QSize size, + const unsigned char *alpha) +{ + const int width = size.width(); + const int height = size.height(); + + if (height % 2 || width % 2) + return; + +#ifdef MMX + int residual = width % 8; + int mmx_width = width - residual; + int c_start_w = mmx_width; +#else + int residual = 0; + int mmx_width = width; + int c_start_w = 0; +#endif + + uint bgra_width = width << 2; + uint chroma_width = width >> 1; + + uint y_extra = (pitches[0] << 1) - width + residual; + uint u_extra = pitches[1] - chroma_width + (residual >> 1); + uint v_extra = pitches[2] - chroma_width + (residual >> 1); + uint d_extra = bgra_width + (residual << 2); + + uint8_t *ypt_1 = (uint8_t *)source + offsets[0]; + uint8_t *ypt_2 = ypt_1 + pitches[0]; + uint8_t *upt = (uint8_t *)source + offsets[1]; + uint8_t *vpt = (uint8_t *)source + offsets[2]; + uint8_t *dst_1 = (uint8_t *) dest; + uint8_t *dst_2 = dst_1 + bgra_width; + + if (alpha) + { + uint8_t *alpha_1 = (uint8_t *) alpha; + uint8_t *alpha_2 = alpha_1 + width; + uint a_extra = width + residual; + +#ifdef MMX + for (int row = 0; row < height; row += 2) + { + for (int col = 0; col < mmx_width; col += 8) + { + mmx_pack_chroma(upt, vpt); + mmx_pack_alpha_low(alpha_1, alpha_2, ypt_1, ypt_2); + mmx_pack_middle(dst_1, dst_2); + mmx_pack_alpha_high(alpha_1, alpha_2, ypt_1, ypt_2); + mmx_pack_end(dst_1, dst_2); + + dst_1 += 32; dst_2 += 32; + alpha_1 += 8; alpha_2 += 8; + ypt_1 += 8; ypt_2 += 8; + upt += 4; vpt += 4; + } + + ypt_1 += y_extra; ypt_2 += y_extra; + upt += u_extra; vpt += v_extra; + dst_1 += d_extra; dst_2 += d_extra; + alpha_1 += a_extra; alpha_2 += a_extra; + } + + emms(); + + if (residual) + { + y_extra = (pitches[0] << 1) - width + mmx_width; + u_extra = pitches[1] - chroma_width + (mmx_width >> 1); + v_extra = pitches[2] - chroma_width + (mmx_width >> 1); + d_extra = bgra_width + (mmx_width << 2); + + ypt_1 = (uint8_t *)source + offsets[0] + mmx_width; + ypt_2 = ypt_1 + pitches[0]; + upt = (uint8_t *)source + offsets[1] + (mmx_width>>1); + vpt = (uint8_t *)source + offsets[2] + (mmx_width>>1); + dst_1 = (uint8_t *) dest + (mmx_width << 2); + dst_2 = dst_1 + bgra_width; + + alpha_1 = (uint8_t *) alpha + mmx_width; + alpha_2 = alpha_1 + width; + a_extra = width + mmx_width; + } + else + { + return; + } +#endif //MMX + + for (int row = 0; row < height; row += 2) + { + for (int col = c_start_w; col < width; col += 2) + { + *(dst_1++) = *vpt; *(dst_2++) = *vpt; + *(dst_1++) = *(alpha_1++); + *(dst_2++) = *(alpha_2++); + *(dst_1++) = *upt; *(dst_2++) = *upt; + *(dst_1++) = *(ypt_1++); + *(dst_2++) = *(ypt_2++); + + *(dst_1++) = *vpt; *(dst_2++) = *(vpt++); + *(dst_1++) = *(alpha_1++); + *(dst_2++) = *(alpha_2++); + *(dst_1++) = *upt; *(dst_2++) = *(upt++); + *(dst_1++) = *(ypt_1++); + *(dst_2++) = *(ypt_2++); + } + + ypt_1 += y_extra; ypt_2 += y_extra; + upt += u_extra; vpt += v_extra; + alpha_1 += a_extra; alpha_2 += a_extra; + dst_1 += d_extra; dst_2 += d_extra; + } + } + else + { + +#ifdef MMX + for (int row = 0; row < height; row += 2) + { + for (int col = 0; col < mmx_width; col += 8) + { + mmx_pack_chroma(upt, vpt); + mmx_pack_alpha1s_low(ypt_1, ypt_2); + mmx_pack_middle(dst_1, dst_2); + mmx_pack_alpha1s_high(ypt_1, ypt_2); + mmx_pack_end(dst_1, dst_2); + + dst_1 += 32; dst_2 += 32; + ypt_1 += 8; ypt_2 += 8; + upt += 4; vpt += 4; + + } + ypt_1 += y_extra; ypt_2 += y_extra; + upt += u_extra; vpt += v_extra; + dst_1 += d_extra; dst_2 += d_extra; + } + + emms(); + + if (residual) + { + y_extra = (pitches[0] << 1) - width + mmx_width; + u_extra = pitches[1] - chroma_width + (mmx_width >> 1); + v_extra = pitches[2] - chroma_width + (mmx_width >> 1); + d_extra = bgra_width + (mmx_width << 2); + + ypt_1 = (uint8_t *)source + offsets[0] + mmx_width; + ypt_2 = ypt_1 + pitches[0]; + upt = (uint8_t *)source + offsets[1] + (mmx_width>>1); + vpt = (uint8_t *)source + offsets[2] + (mmx_width>>1); + dst_1 = (uint8_t *) dest + (mmx_width << 2); + dst_2 = dst_1 + bgra_width; + } + else + { + return; + } +#endif //MMX + + for (int row = 0; row < height; row += 2) + { + for (int col = c_start_w; col < width; col += 2) + { + *(dst_1++) = *vpt; *(dst_2++) = *vpt; + *(dst_1++) = 255; *(dst_2++) = 255; + *(dst_1++) = *upt; *(dst_2++) = *upt; + *(dst_1++) = *(ypt_1++); + *(dst_2++) = *(ypt_2++); + + *(dst_1++) = *vpt; *(dst_2++) = *(vpt++); + *(dst_1++) = 255; *(dst_2++) = 255; + *(dst_1++) = *upt; *(dst_2++) = *(upt++); + *(dst_1++) = *(ypt_1++); + *(dst_2++) = *(ypt_2++); + } + ypt_1 += y_extra; ypt_2 += y_extra; + upt += u_extra; vpt += v_extra; + dst_1 += d_extra; dst_2 += d_extra; + } + } +} + +void pack_yv12interlaced(const unsigned char *source, + const unsigned char *dest, + const int *offsets, + const int *pitches, + const QSize size) +{ + int width = size.width(); + int height = size.height(); + + if (height % 4 || width % 2) + return; + + uint bgra_width = width << 2; + uint dwrap = (bgra_width << 2) - bgra_width; + uint chroma_width = width >> 1; + uint ywrap = (pitches[0] << 1) - width; + uint uwrap = (pitches[1] << 1) - chroma_width; + uint vwrap = (pitches[2] << 1) - chroma_width; + + uint8_t *ypt_1 = (uint8_t *)source + offsets[0]; + uint8_t *ypt_2 = ypt_1 + pitches[0]; + uint8_t *ypt_3 = ypt_1 + (pitches[0] * (height - 2)); + uint8_t *ypt_4 = ypt_3 + pitches[0]; + + uint8_t *u1 = (uint8_t *)source + offsets[1]; + uint8_t *v1 = (uint8_t *)source + offsets[2]; + uint8_t *u2 = u1 + pitches[1]; uint8_t *v2 = v1 + pitches[2]; + uint8_t *u3 = u1 + (pitches[1] * ((height - 4) >> 1)); + uint8_t *v3 = v1 + (pitches[2] * ((height - 4) >> 1)); + uint8_t *u4 = u3 + pitches[1]; uint8_t *v4 = v3 + pitches[2]; + + uint8_t *dst_1 = (uint8_t *) dest; + uint8_t *dst_2 = dst_1 + bgra_width; + uint8_t *dst_3 = dst_1 + (bgra_width * (height - 2)); + uint8_t *dst_4 = dst_3 + bgra_width; + +#ifdef MMX + + if (!(width % 8)) + { + // pack first 2 and last 2 rows + for (int col = 0; col < width; col += 8) + { + mmx_pack_chroma(u1, v1); + mmx_pack_easy(dst_1, ypt_1); + mmx_pack_chroma(u2, v2); + mmx_pack_easy(dst_2, ypt_2); + mmx_pack_chroma(u3, v3); + mmx_pack_easy(dst_3, ypt_3); + mmx_pack_chroma(u4, v4); + mmx_pack_easy(dst_4, ypt_4); + + dst_1 += 32; dst_2 += 32; dst_3 += 32; dst_4 += 32; + ypt_1 += 8; ypt_2 += 8; ypt_3 += 8; ypt_4 += 8; + u1 += 4; v1 += 4; u2 += 4; v2 += 4; + u3 += 4; v3 += 4; u4 += 4; v4 += 4; + } + + ypt_1 += ywrap; ypt_2 += ywrap; + dst_1 += bgra_width; dst_2 += bgra_width; + + ypt_3 = ypt_2 + pitches[0]; + ypt_4 = ypt_3 + pitches[0]; + dst_3 = dst_2 + bgra_width; + dst_4 = dst_3 + bgra_width; + + ywrap = (pitches[0] << 2) - width; + + u1 = (uint8_t *)source + offsets[1]; + v1 = (uint8_t *)source + offsets[2]; + u2 = u1 + pitches[1]; v2 = v1 + pitches[2]; + u3 = u2 + pitches[1]; v3 = v2 + pitches[2]; + u4 = u3 + pitches[1]; v4 = v3 + pitches[2]; + + height -= 4; + + // pack main body + for (int row = 0 ; row < height; row += 4) + { + for (int col = 0; col < width; col += 8) + { + mmx_interp_start(u1, u3); mmx_interp_endu(); + mmx_interp_start(v1, v3); mmx_interp_endv(); + mmx_pack_easy(dst_1, ypt_1); + + mmx_interp_start(u2, u4); mmx_interp_endu(); + mmx_interp_start(v2, v4); mmx_interp_endv(); + mmx_pack_easy(dst_2, ypt_2); + + mmx_interp_start(u3, u1); mmx_interp_endu(); + mmx_interp_start(v3, v1); mmx_interp_endv(); + mmx_pack_easy(dst_3, ypt_3); + + mmx_interp_start(u4, u2); mmx_interp_endu(); + mmx_interp_start(v4, v2); mmx_interp_endv(); + mmx_pack_easy(dst_4, ypt_4); + + dst_1 += 32; dst_2 += 32; dst_3 += 32; dst_4 += 32; + ypt_1 += 8; ypt_2 += 8; ypt_3 += 8; ypt_4 += 8; + u1 += 4; u2 += 4; u3 += 4; u4 += 4; + v1 += 4; v2 += 4; v3 += 4; v4 += 4; + } + + ypt_1 += ywrap; ypt_2 += ywrap; ypt_3 += ywrap; ypt_4 += ywrap; + dst_1 += dwrap; dst_2 += dwrap; dst_3 += dwrap; dst_4 += dwrap; + u1 += uwrap; v1 += vwrap; u2 += uwrap; v2 += vwrap; + u3 += uwrap; v3 += vwrap; u4 += uwrap;v4 += vwrap; + } + + emms(); + + return; + } +#endif //MMX + + // pack first 2 and last 2 rows + for (int col = 0; col < width; col += 2) + { + *(dst_1++) = *v1; *(dst_2++) = *v2; *(dst_3++) = *v3; *(dst_4++) = *v4; + *(dst_1++) = 255; *(dst_2++) = 255; *(dst_3++) = 255; *(dst_4++) = 255; + *(dst_1++) = *u1; *(dst_2++) = *u2; *(dst_3++) = *u3; *(dst_4++) = *u4; + *(dst_1++) = *(ypt_1++); *(dst_2++) = *(ypt_2++); + *(dst_3++) = *(ypt_3++); *(dst_4++) = *(ypt_4++); + + *(dst_1++) = *(v1++); *(dst_2++) = *(v2++); + *(dst_3++) = *(v3++); *(dst_4++) = *(v4++); + *(dst_1++) = 255; *(dst_2++) = 255; *(dst_3++) = 255; *(dst_4++) = 255; + *(dst_1++) = *(u1++); *(dst_2++) = *(u2++); + *(dst_3++) = *(u3++); *(dst_4++) = *(u4++); + *(dst_1++) = *(ypt_1++); *(dst_2++) = *(ypt_2++); + *(dst_3++) = *(ypt_3++); *(dst_4++) = *(ypt_4++); + } + + ypt_1 += ywrap; ypt_2 += ywrap; + dst_1 += bgra_width; dst_2 += bgra_width; + + ypt_3 = ypt_2 + pitches[0]; + ypt_4 = ypt_3 + pitches[0]; + dst_3 = dst_2 + bgra_width; + dst_4 = dst_3 + bgra_width; + + ywrap = (pitches[0] << 2) - width; + + u1 = (uint8_t *)source + offsets[1]; + v1 = (uint8_t *)source + offsets[2]; + u2 = u1 + pitches[1]; v2 = v1 + pitches[2]; + u3 = u2 + pitches[1]; v3 = v2 + pitches[2]; + u4 = u3 + pitches[1]; v4 = v3 + pitches[2]; + + height -= 4; + + uint8_t v[4], u[4]; + + // pack main body + for (int row = 0; row < height; row += 4) + { + for (int col = 0; col < width; col += 2) + { + c_interp(v, v1, v2, v3, v4); + c_interp(u, u1, u2, u3, u4); + + *(dst_1++) = v[0]; *(dst_2++) = v[1]; + *(dst_3++) = v[2]; *(dst_4++) = v[3]; + *(dst_1++) = 255; *(dst_2++) = 255; *(dst_3++) = 255; *(dst_4++) = 255; + *(dst_1++) = u[0]; *(dst_2++) = u[1]; + *(dst_3++) = u[2]; *(dst_4++) = u[3]; + *(dst_1++) = *(ypt_1++); *(dst_2++) = *(ypt_2++); + *(dst_3++) = *(ypt_3++); *(dst_4++) = *(ypt_4++); + + *(dst_1++) = v[0]; *(dst_2++) = v[1]; + *(dst_3++) = v[2]; *(dst_4++) = v[3]; + *(dst_1++) = 255; *(dst_2++) = 255; *(dst_3++) = 255; *(dst_4++) = 255; + *(dst_1++) = u[0]; *(dst_2++) = u[1]; + *(dst_3++) = u[2]; *(dst_4++) = u[3]; + *(dst_1++) = *(ypt_1++); *(dst_2++) = *(ypt_2++); + *(dst_3++) = *(ypt_3++); *(dst_4++) = *(ypt_4++); + + v1++; v2++; v3++; v4++; + u1++; u2++; u3++; u4++; + } + ypt_1 += ywrap; ypt_2 += ywrap; ypt_3 += ywrap; ypt_4 += ywrap; + u1 += uwrap; u2 += uwrap; u3 += uwrap; u4 += uwrap; + v1 += vwrap; v2 += vwrap; v3 += vwrap; v4 += vwrap; + dst_1 += dwrap; dst_2 += dwrap; dst_3 += dwrap; dst_4 += dwrap; + } +} + +void store_bicubic_weights(float x, float *dst) +{ + float w0 = (((-1 * x + 3) * x - 3) * x + 1) / 6; + float w1 = ((( 3 * x - 6) * x + 0) * x + 4) / 6; + float w2 = (((-3 * x + 3) * x + 3) * x + 1) / 6; + float w3 = ((( 1 * x + 0) * x + 0) * x + 0) / 6; + *dst++ = 1 + x - w1 / (w0 + w1); + *dst++ = 1 - x + w3 / (w2 + w3); + *dst++ = w0 + w1; + *dst++ = 0; +} diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/util-opengl.h mythtv/libs/libmythtv/util-opengl.h --- mythtv.ori/libs/libmythtv/util-opengl.h 2009-01-23 16:38:27.000000000 +1100 +++ mythtv/libs/libmythtv/util-opengl.h 2009-02-10 14:00:22.000000000 +1100 @@ -8,6 +8,7 @@ // MythTV headers #include "mythcontext.h" #include "util-x11.h" +#include "frame.h" // GLX headers #define GLX_GLXEXT_PROTOTYPES @@ -17,13 +18,6 @@ // Qt headers #include -#ifndef APIENTRY -#define APIENTRY -#endif -#ifndef APIENTRYP -#define APIENTRYP APIENTRY * -#endif - #ifndef GL_TEXTURE_RECTANGLE_ARB #define GL_TEXTURE_RECTANGLE_ARB 0x84F5 #endif @@ -40,10 +34,6 @@ #define GL_FRAMEBUFFER_INCOMPLETE_DUPLICATE_ATTACHMENT_EXT 0x8CD8 #endif -#ifndef GL_FRAGMENT_PROGRAM_ARB -#define GL_FRAGMENT_PROGRAM_ARB 0x8804 -#endif - // Not all platforms with OpenGL that MythTV supports have the // GL_EXT_framebuffer_object extension so we need to define these.. #ifndef GL_FRAMEBUFFER_EXT @@ -77,6 +67,9 @@ #define GL_FRAMEBUFFER_UNSUPPORTED_EXT 0x8CDD #endif +#ifndef GL_NV_fence +#define GL_ALL_COMPLETED_NV 0x84F2 +#endif #ifndef GLX_VERSION_1_3 typedef XID GLXPbuffer; @@ -113,8 +106,7 @@ Window get_gl_window(Display *XJ_disp, Window XJ_curwin, XVisualInfo *visinfo, - const QSize &window_size, - bool map_window); + const QRect &window_rect); GLXWindow get_glx_window(Display *XJ_disp, GLXFBConfig glx_fbconfig, @@ -123,11 +115,20 @@ GLXPbuffer glx_pbuffer, const QSize &window_size); -void copy_pixels_to_texture(const unsigned char *buf, - int buffer_format, - const QSize &buffer_size, - int texture, - int texture_type); +void pack_yv12alpha(const unsigned char *source, + const unsigned char *dest, + const int *offsets, + const int *pitches, + const QSize size, + const unsigned char *alpha = NULL); + +void pack_yv12interlaced(const unsigned char *source, + const unsigned char *dest, + const int *offsets, + const int *pitches, + const QSize size); + +void store_bicubic_weights(float x, float *dst); __GLXextFuncPtr get_gl_proc_address(const QString &procName); @@ -135,6 +136,8 @@ bool has_gl_fbuffer_object_support(const QString &extensions); bool has_gl_fragment_program_support(const QString &extensions); bool has_glx_video_sync_support(const QString &glx_extensions); +bool has_gl_pixelbuffer_object_support(const QString &extensions); +bool has_gl_nvfence_support(const QString &extensions); extern QString gMythGLExtensions; extern uint gMythGLExtSupported; @@ -146,6 +149,13 @@ extern PFNGLDELETEPROGRAMSARBPROC gMythGLDeleteProgramsARB; extern PFNGLGETPROGRAMIVARBPROC gMythGLGetProgramivARB; +extern PFNGLMAPBUFFERPROC gMythGLMapBufferARB; +extern PFNGLBINDBUFFERARBPROC gMythGLBindBufferARB; +extern PFNGLGENBUFFERSARBPROC gMythGLGenBuffersARB; +extern PFNGLBUFFERDATAARBPROC gMythGLBufferDataARB; +extern PFNGLUNMAPBUFFERARBPROC gMythGLUnmapBufferARB; +extern PFNGLDELETEBUFFERSARBPROC gMythGLDeleteBuffersARB; + // Not all platforms with OpenGL that MythTV supports have the // GL_EXT_framebuffer_object extension so we need to define these.. typedef void (APIENTRYP MYTH_GLGENFRAMEBUFFERSEXTPROC) @@ -169,6 +179,10 @@ extern PFNGLXGETVIDEOSYNCSGIPROC gMythGLXGetVideoSyncSGI; extern PFNGLXWAITVIDEOSYNCSGIPROC gMythGLXWaitVideoSyncSGI; +extern PFNGLGENFENCESNVPROC gMythGLGenFencesNV; +extern PFNGLDELETEFENCESNVPROC gMythGLDeleteFencesNV; +extern PFNGLSETFENCENVPROC gMythGLSetFenceNV; +extern PFNGLFINISHFENCENVPROC gMythGLFinishFenceNV; #endif // USING_OPENGL #endif // _UTIL_OPENGL_H_ diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/util-vdpau.cpp mythtv/libs/libmythtv/util-vdpau.cpp --- mythtv.ori/libs/libmythtv/util-vdpau.cpp 1970-01-01 10:00:00.000000000 +1000 +++ mythtv/libs/libmythtv/util-vdpau.cpp 2009-02-11 15:25:16.000000000 +1100 @@ -0,0 +1,2159 @@ +#include +#include +#include + +#include "mythcontext.h" +extern "C" { +#include "frame.h" +#include "avutil.h" +#include "vdpau_render.h" +} + +#include "videoouttypes.h" +#include "mythcodecid.h" +#include "util-x11.h" +#include "util-vdpau.h" + +#define LOC QString("VDPAU: ") +#define LOC_ERR QString("VDPAU Error: ") + +#define MIN_OUTPUT_SURFACES 2 +#define MAX_OUTPUT_SURFACES 4 +#define NUM_REFERENCE_FRAMES 3 + +#define ARSIZE(x) (sizeof(x) / sizeof((x)[0])) + +/* MACRO for error check */ +#define CHECK_ST \ + ok &= (vdp_st == VDP_STATUS_OK); \ + if (!ok) { \ + VERBOSE(VB_PLAYBACK, LOC_ERR + QString("Error at %1:%2 (#%3, %4)") \ + .arg(__FILE__).arg( __LINE__).arg(vdp_st) \ + .arg(vdp_get_error_string(vdp_st))); \ + } + +static const VdpChromaType vdp_chroma_type = VDP_CHROMA_TYPE_420; +static const VdpOutputSurfaceRenderBlendState osd_blend = + { + VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION, + VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, + VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, + VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, + VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, + VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, + VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD + }; + +static const VdpOutputSurfaceRenderBlendState pip_blend = + { + VDP_OUTPUT_SURFACE_RENDER_BLEND_STATE_VERSION, + VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, + VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, + VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ONE, + VDP_OUTPUT_SURFACE_RENDER_BLEND_FACTOR_ZERO, + VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD, + VDP_OUTPUT_SURFACE_RENDER_BLEND_EQUATION_ADD + }; + +static void vdpau_preemption_callback(VdpDevice device, void *vdpau_ctx) +{ + (void)device; + VERBOSE(VB_IMPORTANT, LOC_ERR + QString("DISPLAY PRE-EMPTED. Aborting playback.")); + VDPAUContext *ctx = (VDPAUContext*)vdpau_ctx; + // TODO this should really kick off re-initialisation + if (ctx) + ctx->SetErrored(); +} + +VDPAUContext::VDPAUContext() + : nextframedelay(0), lastframetime(0), + pix_fmt(-1), maxVideoWidth(0), maxVideoHeight(0), + videoSurfaces(0), surface_render(0), checkVideoSurfaces(8), + numSurfaces(0), + videoSurface(0), outputSurface(0), checkOutputSurfaces(false), + outputSize(QSize(0,0)), decoder(0), maxReferences(2), + videoMixer(0), surfaceNum(0), osdVideoSurface(0), + osdOutputSurface(0), osdVideoMixer(0), osdAlpha(0), + osdReady(false), osdSize(QSize(0,0)) ,deintAvail(false), + deinterlacer("notset"), deinterlacing(false), currentFrameNum(-1), + needDeintRefs(false), useColorControl(false), + pipFrameSize(QSize(0,0)), pipVideoSurface(0), + pipOutputSurface(0), pipAlpha(0), + pipVideoMixer(0), pipReady(0), + vdp_flip_target(NULL), vdp_flip_queue(NULL), + vdpauDecode(false), vdp_device(NULL), errored(false), + vdp_get_proc_address(NULL), vdp_device_destroy(NULL), + vdp_get_error_string(NULL), vdp_get_api_version(NULL), + vdp_get_information_string(NULL), vdp_video_surface_create(NULL), + vdp_video_surface_destroy(NULL), vdp_video_surface_put_bits_y_cb_cr(NULL), + vdp_video_surface_get_bits_y_cb_cr(NULL), + vdp_video_surface_query_get_put_bits_y_cb_cr_capabilities(NULL), + vdp_video_surface_query_capabilities(NULL), + vdp_output_surface_put_bits_y_cb_cr(NULL), + vdp_output_surface_put_bits_native(NULL), vdp_output_surface_create(NULL), + vdp_output_surface_destroy(NULL), + vdp_output_surface_render_bitmap_surface(NULL), + vdp_output_surface_query_capabilities(NULL), vdp_video_mixer_create(NULL), + vdp_video_mixer_set_feature_enables(NULL), vdp_video_mixer_destroy(NULL), + vdp_video_mixer_render(NULL), vdp_video_mixer_set_attribute_values(NULL), + vdp_video_mixer_query_feature_support(NULL), + vdp_video_mixer_query_attribute_support(NULL), + vdp_video_mixer_query_parameter_support(NULL), + vdp_generate_csc_matrix(NULL), + vdp_presentation_queue_target_destroy(NULL), + vdp_presentation_queue_create(NULL), + vdp_presentation_queue_destroy(NULL), vdp_presentation_queue_display(NULL), + vdp_presentation_queue_block_until_surface_idle(NULL), + vdp_presentation_queue_target_create_x11(NULL), + vdp_presentation_queue_query_surface_status(NULL), + vdp_presentation_queue_get_time(NULL), + vdp_presentation_queue_set_background_color(NULL), + vdp_decoder_create(NULL), vdp_decoder_destroy(NULL), + vdp_decoder_render(NULL), vdp_bitmap_surface_create(NULL), + vdp_bitmap_surface_destroy(NULL), vdp_bitmap_surface_put_bits_native(NULL), + vdp_bitmap_surface_query_capabilities(NULL), + vdp_preemption_callback_register(NULL) +{ +} + +VDPAUContext::~VDPAUContext() +{ +} + +bool VDPAUContext::Init(Display *disp, int screen, + Window win, QSize screen_size, + bool color_control, MythCodecID mcodecid) +{ + outputSize = screen_size; + + if ((kCodec_VDPAU_BEGIN < mcodecid) && (mcodecid < kCodec_VDPAU_END)) + vdpauDecode = true; + + bool ok; + + ok = InitProcs(disp, screen); + if (!ok) + return ok; + + ok = InitFlipQueue(win); + if (!ok) + return ok; + + ok = InitOutput(screen_size); + if (!ok) + return ok; + + if (color_control) + useColorControl = InitColorControl(); + + return ok; +} + +void VDPAUContext::Deinit(void) +{ + if (decoder) + { + vdp_decoder_destroy(decoder); + decoder = NULL; + pix_fmt = -1; + } + ClearReferenceFrames(); + DeinitOSD(); + FreeOutput(); + DeinitFlipQueue(); + DeinitPip(); + DeinitProcs(); + outputSize = QSize(0,0); +} + +static const char* dummy_get_error_string(VdpStatus status) +{ + static const char dummy[] = "Unknown"; + return &dummy[0]; +} + +bool VDPAUContext::InitProcs(Display *disp, int screen) +{ + VdpStatus vdp_st; + bool ok = true; + vdp_get_error_string = &dummy_get_error_string; + + vdp_st = vdp_device_create_x11( + disp, + screen, + &vdp_device, + &vdp_get_proc_address + ); + CHECK_ST + if (!ok) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + + QString("Failed to create VDP Device.")); + return false; + } + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_GET_ERROR_STRING, + (void **)&vdp_get_error_string + ); + ok &= (vdp_st == VDP_STATUS_OK); + if (!ok) + vdp_get_error_string = &dummy_get_error_string; + + // non-fatal debugging info + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_GET_API_VERSION, + (void **)&vdp_get_api_version + ); + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_GET_INFORMATION_STRING, + (void **)&vdp_get_information_string + ); + + static bool debugged = false; + + if (!debugged) + { + debugged = true; + if (vdp_get_api_version) + { + uint version; + vdp_get_api_version(&version); + VERBOSE(VB_PLAYBACK, LOC + QString("Version %1").arg(version)); + } + if (vdp_get_information_string) + { + const char * info; + vdp_get_information_string(&info); + VERBOSE(VB_PLAYBACK, LOC + QString("Information %2").arg(info)); + } + } + + // non-fatal callback registration + vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_PREEMPTION_CALLBACK_REGISTER, + (void **)&vdp_preemption_callback_register + ); + + if (vdp_preemption_callback_register) + { + vdp_preemption_callback_register( + vdp_device, + &vdpau_preemption_callback, + (void*)this + ); + } + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_DEVICE_DESTROY, + (void **)&vdp_device_destroy + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_VIDEO_SURFACE_CREATE, + (void **)&vdp_video_surface_create + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_VIDEO_SURFACE_DESTROY, + (void **)&vdp_video_surface_destroy + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_VIDEO_SURFACE_PUT_BITS_Y_CB_CR, + (void **)&vdp_video_surface_put_bits_y_cb_cr + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR, + (void **)&vdp_video_surface_get_bits_y_cb_cr + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES, + (void **)&vdp_video_surface_query_capabilities + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_Y_CB_CR, + (void **)&vdp_output_surface_put_bits_y_cb_cr + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_OUTPUT_SURFACE_PUT_BITS_NATIVE, + (void **)&vdp_output_surface_put_bits_native + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_OUTPUT_SURFACE_CREATE, + (void **)&vdp_output_surface_create + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_OUTPUT_SURFACE_DESTROY, + (void **)&vdp_output_surface_destroy + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_OUTPUT_SURFACE_RENDER_BITMAP_SURFACE, + (void **)&vdp_output_surface_render_bitmap_surface + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_VIDEO_SURFACE_QUERY_CAPABILITIES, + (void **)&vdp_output_surface_query_capabilities + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_VIDEO_MIXER_CREATE, + (void **)&vdp_video_mixer_create + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_VIDEO_MIXER_SET_FEATURE_ENABLES, + (void **)&vdp_video_mixer_set_feature_enables + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_VIDEO_MIXER_DESTROY, + (void **)&vdp_video_mixer_destroy + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_VIDEO_MIXER_RENDER, + (void **)&vdp_video_mixer_render + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_VIDEO_MIXER_SET_ATTRIBUTE_VALUES, + (void **)&vdp_video_mixer_set_attribute_values + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_VIDEO_MIXER_QUERY_FEATURE_SUPPORT, + (void **)&vdp_video_mixer_query_feature_support + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_VIDEO_MIXER_QUERY_PARAMETER_SUPPORT, + (void **)&vdp_video_mixer_query_parameter_support + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_VIDEO_MIXER_QUERY_ATTRIBUTE_SUPPORT, + (void **)&vdp_video_mixer_query_attribute_support + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_GENERATE_CSC_MATRIX, + (void **)&vdp_generate_csc_matrix + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_DESTROY, + (void **)&vdp_presentation_queue_target_destroy + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_PRESENTATION_QUEUE_CREATE, + (void **)&vdp_presentation_queue_create + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_PRESENTATION_QUEUE_DESTROY, + (void **)&vdp_presentation_queue_destroy + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_PRESENTATION_QUEUE_DISPLAY, + (void **)&vdp_presentation_queue_display + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_PRESENTATION_QUEUE_GET_TIME, + (void **)&vdp_presentation_queue_get_time + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_PRESENTATION_QUEUE_BLOCK_UNTIL_SURFACE_IDLE, + (void **)&vdp_presentation_queue_block_until_surface_idle + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_CREATE_X11, + (void **)&vdp_presentation_queue_target_create_x11 + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_PRESENTATION_QUEUE_SET_BACKGROUND_COLOR, + (void **)&vdp_presentation_queue_set_background_color + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_DECODER_CREATE, + (void **)&vdp_decoder_create + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_DECODER_DESTROY, + (void **)&vdp_decoder_destroy + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_DECODER_RENDER, + (void **)&vdp_decoder_render + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_PRESENTATION_QUEUE_QUERY_SURFACE_STATUS, + (void **)&vdp_presentation_queue_query_surface_status + ); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES, + (void **)&vdp_video_surface_query_get_put_bits_y_cb_cr_capabilities); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_BITMAP_SURFACE_CREATE, + (void **)&vdp_bitmap_surface_create); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_BITMAP_SURFACE_PUT_BITS_NATIVE, + (void **)&vdp_bitmap_surface_put_bits_native); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_BITMAP_SURFACE_QUERY_CAPABILITIES, + (void **)&vdp_bitmap_surface_query_capabilities); + CHECK_ST + + vdp_st = vdp_get_proc_address( + vdp_device, + VDP_FUNC_ID_BITMAP_SURFACE_DESTROY, + (void **)&vdp_bitmap_surface_destroy); + CHECK_ST + + return ok; +} + +void VDPAUContext::DeinitProcs(void) +{ + if (vdp_device && vdp_device_destroy) + { + vdp_device_destroy(vdp_device); + vdp_device = 0; + } +} + +bool VDPAUContext::InitFlipQueue(Window win) +{ + VdpStatus vdp_st; + bool ok = true; + + vdp_st = vdp_presentation_queue_target_create_x11( + vdp_device, + win, + &vdp_flip_target + ); + CHECK_ST + + vdp_st = vdp_presentation_queue_create( + vdp_device, + vdp_flip_target, + &vdp_flip_queue + ); + CHECK_ST + + float tmp = 2.0 / 255.0; + VdpColor background; + background.red = tmp; + background.green = tmp; + background.blue = tmp; + background.alpha = 1.0f; + + if (ok) + { + vdp_st = vdp_presentation_queue_set_background_color( + vdp_flip_queue, + &background + ); + CHECK_ST + } + + return ok; +} + +void VDPAUContext::DeinitFlipQueue(void) +{ + VdpStatus vdp_st; + bool ok = true; + + if (vdp_flip_queue) + { + vdp_st = vdp_presentation_queue_destroy( + vdp_flip_queue); + vdp_flip_queue = 0; + CHECK_ST + } + + if (vdp_flip_target) + { + vdp_st = vdp_presentation_queue_target_destroy( + vdp_flip_target); + vdp_flip_target = 0; + CHECK_ST + } +} + +bool VDPAUContext::InitBuffers(int width, int height, int numbufs, + LetterBoxColour letterbox_colour) +{ + int num_bufs = numbufs; + + // for software decode, create enough surfaces for deinterlacing + // TODO only create when actually deinterlacing + if (!vdpauDecode) + num_bufs = NUM_REFERENCE_FRAMES; + + VdpStatus vdp_st; + bool ok = true; + + int i; + + VdpBool supported; + vdp_st = vdp_video_surface_query_capabilities( + vdp_device, + vdp_chroma_type, + &supported, + &maxVideoWidth, + &maxVideoHeight + ); + CHECK_ST + + if (!supported || !ok) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + + QString("Video surface -chroma type not supported.")); + return false; + } + else if (maxVideoWidth < (uint)width || + maxVideoHeight < (uint)height) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + + QString("Video surface - too large (%1x%2 > %3x%4).") + .arg(width).arg(height) + .arg(maxVideoWidth).arg(maxVideoHeight)); + return false; + } + + videoSurfaces = (VdpVideoSurface *)malloc(sizeof(VdpVideoSurface) * num_bufs); + if (vdpauDecode) + { + surface_render = (vdpau_render_state_t*)malloc(sizeof(vdpau_render_state_t) * num_bufs); + memset(surface_render, 0, sizeof(vdpau_render_state_t) * num_bufs); + } + + numSurfaces = num_bufs; + + for (i = 0; i < num_bufs; i++) + { + vdp_st = vdp_video_surface_create( + vdp_device, + vdp_chroma_type, + width, + height, + &(videoSurfaces[i]) + ); + CHECK_ST + + if (!ok) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + + QString("Failed to create video surface.")); + return false; + } + + if (vdpauDecode) + { + surface_render[i].magic = MP_VDPAU_RENDER_MAGIC; + surface_render[i].state = 0; + surface_render[i].surface = videoSurfaces[i]; + } + } + + // clear video surfaces to black + vdp_st = vdp_video_surface_query_get_put_bits_y_cb_cr_capabilities( + vdp_device, + vdp_chroma_type, + VDP_YCBCR_FORMAT_YV12, + &supported); + + if (supported && (vdp_st == VDP_STATUS_OK)) + { + unsigned char *tmp = + new unsigned char[(width * height * 3)>>1]; + if (tmp) + { + bzero(tmp, width * height); + memset(tmp + (width * height), 127, (width * height)>>1); + uint32_t pitches[3] = {width, width, width>>1}; + void* const planes[3] = + {tmp, tmp + (width * height), tmp + (width * height)}; + for (i = 0; i < num_bufs; i++) + { + vdp_video_surface_put_bits_y_cb_cr( + videoSurfaces[i], + VDP_YCBCR_FORMAT_YV12, + planes, + pitches + ); + } + delete [] tmp; + } + + } + + // TODO video capability/parameter check + // but should just fail gracefully anyway + + uint32_t num_layers = 2; // PiP and OSD + VdpVideoMixerParameter parameters[] = { + VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH, + VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_HEIGHT, + VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE, + VDP_VIDEO_MIXER_PARAMETER_LAYERS, + }; + + void const * parameter_values[] = { + &width, + &height, + &vdp_chroma_type, + &num_layers + }; + + // check deinterlacers available + vdp_st = vdp_video_mixer_query_parameter_support( + vdp_device, + VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL, + &supported + ); + CHECK_ST + deintAvail = (ok && supported); + vdp_st = vdp_video_mixer_query_parameter_support( + vdp_device, + VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL_SPATIAL, + &supported + ); + CHECK_ST + deintAvail &= (ok && supported); + + VdpVideoMixerFeature features[] = { + VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL, + VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL_SPATIAL, + }; + + vdp_st = vdp_video_mixer_create( + vdp_device, + deintAvail ? ARSIZE(features) : 0, + deintAvail ? features : NULL, + ARSIZE(parameters), + parameters, + parameter_values, + &videoMixer + ); + CHECK_ST + + if (!ok && videoMixer) + { + VERBOSE(VB_IMPORTANT, LOC_ERR + + QString("Create video mixer - errored but returned handle.")); + } + + // minimise green screen + if (ok) + ClearScreen(); + + // set letterbox colour + if (ok && (letterbox_colour == kLetterBoxColour_Gray25)) + { + VdpColor gray; + gray.red = 0.5f; + gray.green = 0.5f; + gray.blue = 0.5f; + gray.alpha = 1.0f; + + VdpVideoMixerAttribute attributes[] = { + VDP_VIDEO_MIXER_ATTRIBUTE_BACKGROUND_COLOR, + }; + void const * attribute_values[] = { &gray }; + + vdp_st = vdp_video_mixer_set_attribute_values( + videoMixer, + ARSIZE(attributes), + attributes, + attribute_values + ); + CHECK_ST + } + + return ok; +} + +void VDPAUContext::FreeBuffers(void) +{ + VdpStatus vdp_st; + bool ok = true; + + int i; + + if (videoMixer) + { + vdp_st = vdp_video_mixer_destroy( + videoMixer + ); + videoMixer = 0; + CHECK_ST + } + + if (videoSurfaces) + { + for (i = 0; i < numSurfaces; i++) + { + if (videoSurfaces[i]) + { + vdp_st = vdp_video_surface_destroy( + videoSurfaces[i]); + CHECK_ST + } + } + free(videoSurfaces); + videoSurfaces = NULL; + } + + if (surface_render) + free(surface_render); + surface_render = NULL; +} + +bool VDPAUContext::InitOutput(QSize size) +{ + VdpStatus vdp_st; + bool ok = true; + int i; + + VdpBool supported; + uint max_width, max_height; + vdp_st = vdp_output_surface_query_capabilities( + vdp_device, + VDP_RGBA_FORMAT_B8G8R8A8, + &supported, + &max_width, + &max_height + ); + CHECK_ST + + if (!supported || !ok) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + + QString("Output surface chroma format not supported.")); + return false; + } + else if (max_width < (uint)size.width() || + max_height < (uint)size.height()) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + + QString("Output surface - too large (%1x%2 > %3x%4).") + .arg(size.width()).arg(size.height()) + .arg(max_width).arg(max_height)); + return false; + } + + for (i = 0; i < MIN_OUTPUT_SURFACES; i++) + { + VdpOutputSurface tmp; + vdp_st = vdp_output_surface_create( + vdp_device, + VDP_RGBA_FORMAT_B8G8R8A8, + size.width(), + size.height(), + &tmp + ); + CHECK_ST + + if (!ok) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + + QString("Failed to create output surface.")); + return false; + } + outputSurfaces.push_back(tmp); + } + + outRect.x0 = 0; + outRect.y0 = 0; + outRect.x1 = size.width(); + outRect.y1 = size.height(); + surfaceNum = 0; + return ok; +} + +void VDPAUContext::FreeOutput(void) +{ + if (!vdp_output_surface_destroy) + return; + + VdpStatus vdp_st; + bool ok = true; + uint i; + + for (i = 0; i < outputSurfaces.size(); i++) + { + if (outputSurfaces[i]) + { + vdp_st = vdp_output_surface_destroy( + outputSurfaces[i]); + CHECK_ST + } + } + outputSurfaces.clear(); + checkOutputSurfaces = false; +} + +void VDPAUContext::Decode(VideoFrame *frame) +{ + if (!vdpauDecode) + { + VERBOSE(VB_IMPORTANT, LOC_ERR + + QString("VDPAUContext::Decode called for cpu decode.")); + return; + } + + VdpStatus vdp_st; + bool ok = true; + vdpau_render_state_t *render = (vdpau_render_state_t *)frame->buf; + + if (frame->pix_fmt != pix_fmt) + { + if (frame->pix_fmt == PIX_FMT_VDPAU_H264_MAIN || + frame->pix_fmt == PIX_FMT_VDPAU_H264_HIGH) + { + if (render) + maxReferences = render->info.h264.num_ref_frames; + + if (maxReferences < 1 || maxReferences > 16) + { + uint32_t round_width = (frame->width + 15) & ~15; + uint32_t round_height = (frame->height + 15) & ~15; + uint32_t surf_size = (round_width * round_height * 3) / 2; + maxReferences = (12 * 1024 * 1024) / surf_size; + } + if (maxReferences > 16) + maxReferences = 16; + } + + VdpDecoderProfile vdp_decoder_profile; + switch (frame->pix_fmt) + { + case PIX_FMT_VDPAU_MPEG1: vdp_decoder_profile = VDP_DECODER_PROFILE_MPEG1; break; + case PIX_FMT_VDPAU_MPEG2_SIMPLE: vdp_decoder_profile = VDP_DECODER_PROFILE_MPEG2_SIMPLE; break; + case PIX_FMT_VDPAU_MPEG2_MAIN: vdp_decoder_profile = VDP_DECODER_PROFILE_MPEG2_MAIN; break; + case PIX_FMT_VDPAU_H264_BASELINE: vdp_decoder_profile = VDP_DECODER_PROFILE_H264_BASELINE; break; + case PIX_FMT_VDPAU_H264_MAIN: vdp_decoder_profile = VDP_DECODER_PROFILE_H264_MAIN; break; + case PIX_FMT_VDPAU_H264_HIGH: vdp_decoder_profile = VDP_DECODER_PROFILE_H264_HIGH; break; + case PIX_FMT_VDPAU_VC1_SIMPLE: vdp_decoder_profile = VDP_DECODER_PROFILE_VC1_SIMPLE; break; + case PIX_FMT_VDPAU_VC1_MAIN: vdp_decoder_profile = VDP_DECODER_PROFILE_VC1_MAIN; break; + case PIX_FMT_VDPAU_VC1_ADVANCED: vdp_decoder_profile = VDP_DECODER_PROFILE_VC1_ADVANCED; break; + default: + assert(0); + return; + } + + // generic capability pre-checked but specific profile may now fail + vdp_st = vdp_decoder_create( + vdp_device, + vdp_decoder_profile, + frame->width, + frame->height, + maxReferences, + &decoder + ); + CHECK_ST + + if (ok) + { + pix_fmt = frame->pix_fmt; + VERBOSE(VB_PLAYBACK, LOC + + QString("Created VDPAU decoder (%1 ref frames)") + .arg(maxReferences)); + } + else + { + VERBOSE(VB_PLAYBACK, LOC_ERR + QString("Failed to create decoder.")); + errored = true; + } + } + + render = (vdpau_render_state_t *)frame->buf; + if (!render || !decoder) + return; + + vdp_st = vdp_decoder_render( + decoder, + render->surface, + (VdpPictureInfo const *)&(render->info), + render->bitstreamBuffersUsed, + render->bitstreamBuffers + ); + CHECK_ST +} + +void VDPAUContext::PrepareVideo(VideoFrame *frame, QRect video_rect, + QRect display_video_rect, + QSize screen_size, FrameScanType scan, + bool pause_frame) +{ + if (checkVideoSurfaces == 1) + checkOutputSurfaces = true; + + if (checkVideoSurfaces > 0) + checkVideoSurfaces--; + + VdpStatus vdp_st; + bool ok = true; + VdpTime dummy; + vdpau_render_state_t *render; + + bool new_frame = true; + bool deint = (deinterlacing && needDeintRefs && !pause_frame); + if (deint && frame) + { + new_frame = UpdateReferenceFrames(frame); + if (vdpauDecode && (referenceFrames.size() != NUM_REFERENCE_FRAMES)) + deint = false; + } + + if (vdpauDecode && frame) + { + render = (vdpau_render_state_t *)frame->buf; + if (!render) + return; + + videoSurface = render->surface; + } + else if (new_frame && frame) + { + int surf = 0; + if (deint) + surf = (currentFrameNum + 1) % NUM_REFERENCE_FRAMES; + + videoSurface = videoSurfaces[surf]; + + uint32_t pitches[3] = { + frame->pitches[0], + frame->pitches[2], + frame->pitches[1] + }; + void* const planes[3] = { + frame->buf, + frame->buf + frame->offsets[2], + frame->buf + frame->offsets[1] + }; + vdp_st = vdp_video_surface_put_bits_y_cb_cr( + videoSurface, + VDP_YCBCR_FORMAT_YV12, + planes, + pitches); + CHECK_ST; + if (!ok) + return; + } + else if (!frame) + { + deint = false; + if (!videoSurface) + videoSurface = videoSurfaces[0]; + } + + if (outRect.x1 != (uint)screen_size.width() || + outRect.y1 != (uint)screen_size.height()) + { + FreeOutput(); + InitOutput(screen_size); + } + + // fix broken/missing negative rect clipping in vdpau + if (display_video_rect.top() < 0 && display_video_rect.height() > 0) + { + float yscale = (float)video_rect.height() / + (float)display_video_rect.height(); + int tmp = video_rect.top() - + (int)((float)display_video_rect.top() * yscale); + video_rect.setTop(max(0, tmp)); + display_video_rect.setTop(0); + } + + if (display_video_rect.left() < 0 && display_video_rect.width() > 0) + { + float xscale = (float)video_rect.width() / + (float)display_video_rect.width(); + int tmp = video_rect.left() - + (int)((float)display_video_rect.left() * xscale); + video_rect.setLeft(max(0, tmp)); + display_video_rect.setLeft(0); + } + + outRect.x0 = 0; + outRect.y0 = 0; + outRect.x1 = screen_size.width(); + outRect.y1 = screen_size.height(); + + VdpRect srcRect; + srcRect.x0 = video_rect.left(); + srcRect.y0 = video_rect.top(); + srcRect.x1 = video_rect.left() + video_rect.width(); + srcRect.y1 = video_rect.top() + video_rect.height(); + + outRectVid.x0 = display_video_rect.left(); + outRectVid.y0 = display_video_rect.top(); + outRectVid.x1 = display_video_rect.left() + display_video_rect.width(); + outRectVid.y1 = display_video_rect.top() + display_video_rect.height(); + + VdpVideoMixerPictureStructure field = + VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME; + + if (scan == kScan_Interlaced && deinterlacing) + field = VDP_VIDEO_MIXER_PICTURE_STRUCTURE_TOP_FIELD; + else if (scan == kScan_Intr2ndField && deinterlacing) + field = VDP_VIDEO_MIXER_PICTURE_STRUCTURE_BOTTOM_FIELD; + + outputSurface = outputSurfaces[surfaceNum]; + usleep(2000); + vdp_st = vdp_presentation_queue_block_until_surface_idle( + vdp_flip_queue, + outputSurface, + &dummy + ); + CHECK_ST + + VdpVideoSurface past_surfaces[2] = { VDP_INVALID_HANDLE, + VDP_INVALID_HANDLE }; + VdpVideoSurface future_surfaces[1] = { VDP_INVALID_HANDLE }; + + if (deint) + { + VdpVideoSurface refs[NUM_REFERENCE_FRAMES]; + for (int i = 0; i < NUM_REFERENCE_FRAMES; i++) + { + if (vdpauDecode) + { + vdpau_render_state_t *render; + render = (vdpau_render_state_t *)referenceFrames[i]->buf; + refs[i] = render ? render->surface : VDP_INVALID_HANDLE; + } + else + { + int ref = (currentFrameNum + i - 1) % NUM_REFERENCE_FRAMES; + if (ref < 0) + ref = 0; + refs[i] = videoSurfaces[ref]; + } + } + + videoSurface = refs[1]; + + if (scan == kScan_Interlaced) + { + // next field is in the current frame + future_surfaces[0] = refs[1]; + // previous two fields are in the previous frame + past_surfaces[0] = refs[0]; + past_surfaces[1] = refs[0]; + } + else + { + // next field is in the next frame + future_surfaces[0] = refs[2]; + // previous field is in the current frame + past_surfaces[0] = refs[1]; + // field before that is in the previous frame + past_surfaces[1] = refs[0]; + } + } + + uint num_layers = 0; + + if (osdReady) { num_layers++; } + if (pipReady) { num_layers++; } + + VdpLayer layers[2]; + + if (num_layers == 1) + { + if (osdReady) + memcpy(&(layers[0]), &osdLayer, sizeof(osdLayer)); + if (pipReady) + memcpy(&(layers[0]), &pipLayer, sizeof(pipLayer)); + } + else if (num_layers == 2) + { + memcpy(&(layers[0]), &pipLayer, sizeof(pipLayer)); + memcpy(&(layers[1]), &osdLayer, sizeof(osdLayer)); + } + + vdp_st = vdp_video_mixer_render( + videoMixer, + VDP_INVALID_HANDLE, + NULL, + field, + deint ? ARSIZE(past_surfaces) : 0, + deint ? past_surfaces : NULL, + videoSurface, + deint ? ARSIZE(future_surfaces) : 0, + deint ? future_surfaces : NULL, + &srcRect, + outputSurface, + &outRect, + &outRectVid, + num_layers, + num_layers ? layers : NULL + ); + CHECK_ST + + if (pipReady) + pipReady--; +} + +void VDPAUContext::DisplayNextFrame(void) +{ + if (!outputSurface) + return; + + VdpStatus vdp_st; + bool ok = true; + VdpTime now = 0; + + if (nextframedelay > 0) + { + vdp_st = vdp_presentation_queue_get_time( + vdp_flip_queue, + &now + ); + CHECK_ST + + if (lastframetime == 0) + lastframetime = now; + + now += nextframedelay * 1000; + } + + vdp_st = vdp_presentation_queue_display( + vdp_flip_queue, + outputSurface, + outRect.x1, + outRect.y1, + now + ); + CHECK_ST + + surfaceNum++; + if (surfaceNum >= (int)(outputSurfaces.size())) + surfaceNum = 0;; + + if (checkOutputSurfaces) + AddOutputSurfaces(); +} + +void VDPAUContext::AddOutputSurfaces(void) +{ + checkOutputSurfaces = false; + VdpStatus vdp_st; + bool ok = true; + + int cnt = 0; + int extra = MAX_OUTPUT_SURFACES - outputSurfaces.size(); + if (extra <= 0) + return; + + for (int i = 0; i < extra; i++) + { + VdpOutputSurface tmp; + vdp_st = vdp_output_surface_create( + vdp_device, + VDP_RGBA_FORMAT_B8G8R8A8, + outputSize.width(), + outputSize.height(), + &tmp + ); + // suppress non-fatal error messages + ok &= (vdp_st == VDP_STATUS_OK); + + if (!ok) + break; + + outputSurfaces.push_back(tmp); + cnt++; + } + VERBOSE(VB_PLAYBACK, LOC + QString("Using %1 output surfaces (max %2)") + .arg(outputSurfaces.size()).arg(MAX_OUTPUT_SURFACES)); +} + +void VDPAUContext::SetNextFrameDisplayTimeOffset(int delayus) +{ + nextframedelay = delayus; +} + +bool VDPAUContext::InitOSD(QSize size) +{ + if (!vdp_device) + return false; + + VdpStatus vdp_st; + bool ok = true; + + uint width = size.width(); + uint height = size.height(); + VdpBool supported = false; + + vdp_st = vdp_video_surface_query_get_put_bits_y_cb_cr_capabilities( + vdp_device, + vdp_chroma_type, + VDP_YCBCR_FORMAT_YV12, + &supported + ); + CHECK_ST + if (!supported || !ok) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + + QString("YV12 upload to video surface not supported.")); + return false; + } + + uint32_t max_width, max_height; + vdp_st = vdp_bitmap_surface_query_capabilities( + vdp_device, + VDP_RGBA_FORMAT_A8, + &supported, + &max_width, + &max_height + ); + CHECK_ST + if (!supported || !ok) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + + QString("Alpha transparency bitmaps not supported.")); + return false; + } + else if (max_width < width || + max_height < height) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + + QString("Alpha bitmap too large (%1x%2 > %3x%4).") + .arg(width).arg(height).arg(max_width).arg(max_height)); + return false; + } + + if (maxVideoWidth < width || + maxVideoHeight < height) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + + QString("OSD size too large for video surface.")); + return false; + } + + // capability already checked in InitOutput + vdp_st = vdp_output_surface_create( + vdp_device, + VDP_RGBA_FORMAT_B8G8R8A8, + width, + height, + &osdOutputSurface + ); + CHECK_ST; + + if (!ok) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + + QString("Failed to create output surface.")); + } + else + { + vdp_st = vdp_video_surface_create( + vdp_device, + vdp_chroma_type, + width, + height, + &osdVideoSurface + ); + CHECK_ST + } + + if (!ok) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + + QString("Failed to create video surface.")); + } + else + { + vdp_st = vdp_bitmap_surface_create( + vdp_device, + VDP_RGBA_FORMAT_A8, + width, + height, + false, + &osdAlpha + ); + CHECK_ST + } + + VdpVideoMixerParameter parameters[] = { + VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH, + VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_HEIGHT, + VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE + }; + + void const * parameter_values[] = { + &width, + &height, + &vdp_chroma_type + }; + + if (!ok) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + + QString("Failed to create bitmap surface.")); + } + else + { + vdp_st = vdp_video_mixer_create( + vdp_device, + 0, + 0, + ARSIZE(parameters), + parameters, + parameter_values, + &osdVideoMixer + ); + CHECK_ST + } + + if (!ok) + { + VERBOSE(VB_PLAYBACK, LOC_ERR + + QString("Failed to create video mixer.")); + } + else + { + osdSize = size; + osdRect.x0 = 0; + osdRect.y0 = 0; + osdRect.x1 = width; + osdRect.y1 = height; + osdLayer.struct_version = VDP_LAYER_VERSION; + osdLayer.source_surface = osdOutputSurface; + osdLayer.source_rect = &osdRect; + osdLayer.destination_rect = &osdRect; + VERBOSE(VB_PLAYBACK, LOC + QString("Created OSD (%1x%2)") + .arg(width).arg(height)); + return ok; + } + + osdSize = QSize(0,0); + return ok; +} + +void VDPAUContext::UpdateOSD(void* const planes[3], + QSize size, + void* const alpha[1]) +{ + if (size != osdSize) + { + DeinitOSD(); + if (!InitOSD(size)) + return; + } + + VdpStatus vdp_st; + bool ok = true; + + // upload OSD YV12 data + uint32_t pitches[3] = {osdSize.width(), + osdSize.width()>>1, + osdSize.width()>>1}; + void * const realplanes[3] = { planes[0], planes[2], planes[1] }; + + vdp_st = vdp_video_surface_put_bits_y_cb_cr(osdVideoSurface, + VDP_YCBCR_FORMAT_YV12, + realplanes, + pitches); + CHECK_ST; + + // osd YV12 colourspace conversion + if (ok) + { + vdp_st = vdp_video_mixer_render( + osdVideoMixer, + VDP_INVALID_HANDLE, + NULL, + VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME, + 0, + NULL, + osdVideoSurface, + 0, + NULL, + NULL, + osdOutputSurface, + &osdRect, + &osdRect, + 0, + NULL + ); + CHECK_ST + } + + // upload OSD alpha data + if (ok) + { + uint32_t pitch[1] = {osdSize.width()}; + vdp_st = vdp_bitmap_surface_put_bits_native( + osdAlpha, + alpha, + pitch, + NULL + ); + CHECK_ST + } + + // blend alpha into osd + if (ok) + { + vdp_st = vdp_output_surface_render_bitmap_surface( + osdOutputSurface, + NULL, + osdAlpha, + NULL, + NULL, + &osd_blend, + 0 + ); + CHECK_ST + } + + osdReady = ok; +} + +void VDPAUContext::DeinitOSD(void) +{ + if (osdOutputSurface) + { + vdp_output_surface_destroy(osdOutputSurface); + osdOutputSurface = 0; + } + + if (osdVideoSurface) + { + vdp_video_surface_destroy(osdVideoSurface); + osdVideoSurface = 0; + } + + if (osdVideoMixer) + { + vdp_video_mixer_destroy(osdVideoMixer); + osdVideoMixer = 0; + } + + if (osdAlpha) + { + vdp_bitmap_surface_destroy(osdAlpha); + osdAlpha = 0; + } + osdSize = QSize(0,0); +} + +bool VDPAUContext::SetDeinterlacer(const QString &deint) +{ + deinterlacer = QDeepCopy(deint); + return true; +} + +bool VDPAUContext::SetDeinterlacing(bool interlaced) +{ + if (!deintAvail) + return false; + + if (!deinterlacer.contains("vdpau")) + interlaced = false; + + VdpStatus vdp_st; + bool ok = interlaced; + + VdpVideoMixerFeature features[] = { + VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL, + VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL_SPATIAL, + }; + + VdpBool temporal = false; + VdpBool spatial = false; + if (deinterlacer.contains("basic")) + { + temporal = interlaced; + } + else if (deinterlacer.contains("advanced")) + { + temporal = interlaced; + spatial = interlaced; + } + + const VdpBool feature_values[] = { + temporal, + spatial, + }; + + // the following call generates a VDPAU error when both temporal + // and spatial are false (i.e. when disabling deinterlacing) + vdp_st = vdp_video_mixer_set_feature_enables( + videoMixer, + ARSIZE(features), + features, + feature_values + ); + CHECK_ST + + deinterlacing = (interlaced & ok); + needDeintRefs = false; + if (!deinterlacing) + { + ClearReferenceFrames(); + } + else + { + if (deinterlacer.contains("advanced") || + deinterlacer.contains("basic")) + needDeintRefs = true; + } + return deinterlacing; +} + +bool VDPAUContext::UpdateReferenceFrames(VideoFrame *frame) +{ + if (frame->frameNumber == currentFrameNum) + return false; + + currentFrameNum = frame->frameNumber; + + if (vdpauDecode) + { + while (referenceFrames.size() > (NUM_REFERENCE_FRAMES - 1)) + referenceFrames.pop_front(); + referenceFrames.push_back(frame); + } + + return true; +} + +bool VDPAUContext::IsBeingUsed(VideoFrame *frame) +{ + if (!frame || !vdpauDecode) + return false; + + return referenceFrames.contains(frame); +} + +bool VDPAUContext::CheckCodecSupported(MythCodecID myth_codec_id) +{ + bool ok = true; + + Display *disp = MythXOpenDisplay(); + if (!disp) + return false; + + int screen; + X11S(screen = DefaultScreen(disp)); + + VdpDevice device; + VdpGetProcAddress * vdp_proc_address; + VdpStatus vdp_st; + VdpGetErrorString * vdp_get_error_string; + vdp_get_error_string = &dummy_get_error_string; + + if (ok) + { + vdp_st = vdp_device_create_x11( + disp, + screen, + &device, + &vdp_proc_address + ); + CHECK_ST + } + + VdpDecoderQueryCapabilities * decoder_query; + VdpDeviceDestroy * device_destroy; + + if (ok) + { + vdp_st = vdp_proc_address( + device, + VDP_FUNC_ID_DECODER_QUERY_CAPABILITIES, + (void **)&decoder_query + ); + CHECK_ST + } + + if (ok) + { + vdp_st = vdp_proc_address( + device, + VDP_FUNC_ID_DEVICE_DESTROY, + (void **)&device_destroy + ); + CHECK_ST + } + + if (ok) + { + int support = 0; + VdpBool supported; + // not checked yet + uint level, refs, width, height; + switch (myth_codec_id) + { + case kCodec_MPEG1_VDPAU: + case kCodec_MPEG2_VDPAU: + vdp_st = decoder_query( + device, + VDP_DECODER_PROFILE_MPEG1, + &supported, + &level, &refs, &width, &height); + CHECK_ST + support += supported; + vdp_st = decoder_query( + device, + VDP_DECODER_PROFILE_MPEG2_SIMPLE, + &supported, + &level, &refs, &width, &height); + CHECK_ST + support += supported; + vdp_st = decoder_query( + device, + VDP_DECODER_PROFILE_MPEG2_MAIN, + &supported, + &level, &refs, &width, &height); + CHECK_ST + support += supported; + break; + + case kCodec_H264_VDPAU: + vdp_st = decoder_query( + device, + VDP_DECODER_PROFILE_H264_BASELINE, + &supported, + &level, &refs, &width, &height); + CHECK_ST + support += supported; + vdp_st = decoder_query( + device, + VDP_DECODER_PROFILE_H264_MAIN, + &supported, + &level, &refs, &width, &height); + CHECK_ST + support += supported; + vdp_st = decoder_query( + device, + VDP_DECODER_PROFILE_H264_HIGH, + &supported, + &level, &refs, &width, &height); + CHECK_ST + support += supported; + break; + + case kCodec_VC1_VDPAU: + // is this correct? (WMV3 == VC1) + case kCodec_WMV3_VDPAU: + vdp_st = decoder_query( + device, + VDP_DECODER_PROFILE_VC1_SIMPLE, + &supported, + &level, &refs, &width, &height); + CHECK_ST + support += supported; + vdp_st = decoder_query( + device, + VDP_DECODER_PROFILE_VC1_MAIN, + &supported, + &level, &refs, &width, &height); + CHECK_ST + support += supported; + vdp_st = decoder_query( + device, + VDP_DECODER_PROFILE_VC1_ADVANCED, + &supported, + &level, &refs, &width, &height); + CHECK_ST + support += supported; + break; + + default: + ok = false; + } + ok = (ok && (support > 0)); + if (ok && support != 3) + { + VERBOSE(VB_IMPORTANT, + QString("VDPAU WARNING: %1 GPU decode not fully supported" + " - playback may fail.") + .arg(toString(myth_codec_id))); + } + else if (!support) + { + VERBOSE(VB_PLAYBACK, LOC + + QString("%1 GPU decode not supported") + .arg(toString(myth_codec_id))); + } + } + + // tidy up + if (device_destroy && device) + device_destroy(device); + + if (disp) + X11S(XCloseDisplay(disp)); + + return ok; +} + +PictureAttributeSupported +VDPAUContext::GetSupportedPictureAttributes(void) const +{ + return (!useColorControl) ? + kPictureAttributeSupported_None : + (PictureAttributeSupported) + (kPictureAttributeSupported_Brightness | + kPictureAttributeSupported_Contrast | + kPictureAttributeSupported_Colour | + kPictureAttributeSupported_Hue); +} + +int VDPAUContext::SetPictureAttribute( + PictureAttribute attribute, int newValue) +{ + if (!useColorControl) + return -1; + + int ret = -1; + float new_val; + switch (attribute) + { + case kPictureAttribute_Brightness: + ret = newValue; + proCamp.brightness = (newValue * 0.02f) - 1.0f; + break; + case kPictureAttribute_Contrast: + ret = newValue; + proCamp.contrast = (newValue * 0.02f); + break; + case kPictureAttribute_Colour: + ret = newValue; + proCamp.saturation = (newValue * 0.02f); + break; + case kPictureAttribute_Hue: + ret = newValue; + new_val = (newValue * 0.062831853f); + if (new_val > 3.14159265f) + new_val -= 6.2831853f; + proCamp.hue = new_val; + break; + default: + break; + } + + if (ret != -1) + SetPictureAttributes(); + + return ret; +} +bool VDPAUContext::InitColorControl(void) +{ + bool ok = true; + VdpStatus vdp_st; + + proCamp.struct_version = VDP_PROCAMP_VERSION; + proCamp.brightness = 0.0; + proCamp.contrast = 1.0; + proCamp.saturation = 1.0; + proCamp.hue = 0.0; + + VdpBool supported; + vdp_st = vdp_video_mixer_query_attribute_support( + vdp_device, + VDP_VIDEO_MIXER_ATTRIBUTE_CSC_MATRIX, + &supported + ); + CHECK_ST + ok &= supported; + return ok; +} + +bool VDPAUContext::SetPictureAttributes(void) +{ + bool ok = true; + VdpStatus vdp_st; + + if (!videoMixer || !useColorControl) + return false; + + vdp_st = vdp_generate_csc_matrix( + &proCamp, + VDP_COLOR_STANDARD_ITUR_BT_601, // detect? + &cscMatrix + ); + CHECK_ST + + VdpVideoMixerAttribute attributes[] = { + VDP_VIDEO_MIXER_ATTRIBUTE_CSC_MATRIX + }; + void const * attribute_values[] = { &cscMatrix }; + + if (ok) + { + vdp_st = vdp_video_mixer_set_attribute_values( + videoMixer, + ARSIZE(attributes), + attributes, + attribute_values + ); + CHECK_ST + } + + return ok; +} + +void VDPAUContext::ClearScreen(void) +{ + VdpStatus vdp_st; + bool ok = true; + + VdpRect srcRect; + srcRect.x0 = 0; + srcRect.y0 = 0; + srcRect.x1 = 1; + srcRect.y1 = 1; + + outputSurface = outputSurfaces[surfaceNum]; + vdp_st = vdp_video_mixer_render( + videoMixer, + VDP_INVALID_HANDLE, + NULL, + VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME, + 0, + NULL, + videoSurfaces[0], + 0, + NULL, + &srcRect, + outputSurface, + &outRect, + &outRect, + 0, + NULL); + CHECK_ST + + DisplayNextFrame(); +} + +void VDPAUContext::DeinitPip(void) +{ + pipFrameSize = QSize(0,0); + pipReady = 0; + + if (pipVideoSurface) + { + vdp_video_surface_destroy(pipVideoSurface); + pipVideoSurface = 0; + } + + if (pipOutputSurface) + { + vdp_output_surface_destroy(pipOutputSurface); + pipOutputSurface = 0; + } + + if (pipVideoMixer) + { + vdp_video_mixer_destroy(pipVideoMixer); + pipVideoMixer = 0; + } + + if (pipAlpha) + { + vdp_bitmap_surface_destroy(pipAlpha); + pipAlpha = 0; + } +} + +bool VDPAUContext::InitPiP(QSize vid_size) +{ + // TODO capability check + // but should just fail gracefully anyway + bool ok = true; + VdpStatus vdp_st; + + pipFrameSize = vid_size; + + vdp_st = vdp_video_surface_create( + vdp_device, + vdp_chroma_type, + vid_size.width(), + vid_size.height(), + &pipVideoSurface + ); + CHECK_ST + + if (ok) + { + vdp_st = vdp_output_surface_create( + vdp_device, + VDP_RGBA_FORMAT_B8G8R8A8, + vid_size.width(), + vid_size.height(), + &pipOutputSurface + ); + CHECK_ST + } + + if (ok) + { + vdp_st = vdp_bitmap_surface_create( + vdp_device, + VDP_RGBA_FORMAT_A8, + vid_size.width(), + vid_size.height(), + false, + &pipAlpha + ); + CHECK_ST + } + + if (ok) + { + unsigned char *alpha = new unsigned char[vid_size.width() * vid_size.height()]; + void const * alpha_ptr[] = {alpha}; + if (alpha) + { + memset(alpha, 255, vid_size.width() * vid_size.height()); + uint32_t pitch[1] = {vid_size.width()}; + vdp_st = vdp_bitmap_surface_put_bits_native( + pipAlpha, + alpha_ptr, + pitch, + NULL + ); + CHECK_ST + delete [] alpha; + } + else + ok = false; + } + + if (ok) + { + int width = vid_size.width(); + int height = vid_size.height(); + VdpVideoMixerParameter parameters[] = { + VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH, + VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_HEIGHT, + VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE + }; + + void const * parameter_values[] = { + &width, + &height, + &vdp_chroma_type + }; + + vdp_st = vdp_video_mixer_create( + vdp_device, + 0, + 0, + ARSIZE(parameters), + parameters, + parameter_values, + &pipVideoMixer + ); + CHECK_ST + VERBOSE(VB_PLAYBACK, LOC + QString("Created VDPAU PiP (%1x%2)") + .arg(width).arg(height)); + } + + pipLayer.struct_version = VDP_LAYER_VERSION; + pipLayer.source_surface = pipOutputSurface; + pipLayer.source_rect = NULL; + pipLayer.destination_rect = &pipPosition; + + return ok; +} + +bool VDPAUContext::ShowPiP(VideoFrame * frame, QRect position) +{ + if (!frame) + return false; + + bool ok = true; + VdpStatus vdp_st; + + if (frame->width != pipFrameSize.width() || + frame->height != pipFrameSize.height()) + { + DeinitPip(); + ok = InitPiP(QSize(frame->width, frame->height)); + } + + if (!ok) + return ok; + + uint32_t pitches[] = { + frame->pitches[0], + frame->pitches[2], + frame->pitches[1] + }; + void* const planes[] = { + frame->buf, + frame->buf + frame->offsets[2], + frame->buf + frame->offsets[1] + }; + vdp_st = vdp_video_surface_put_bits_y_cb_cr( + pipVideoSurface, + VDP_YCBCR_FORMAT_YV12, + planes, + pitches); + CHECK_ST; + + VdpRect pip_rect; + pip_rect.x0 = 0; + pip_rect.y0 = 0; + pip_rect.x1 = pipFrameSize.width(); + pip_rect.y1 = pipFrameSize.height(); + if (ok) + { + vdp_st = vdp_video_mixer_render( + pipVideoMixer, + VDP_INVALID_HANDLE, + NULL, + VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME, + 0, + NULL, + pipVideoSurface, + 0, + NULL, + NULL, + pipOutputSurface, + NULL, + NULL, + 0, + NULL + ); + CHECK_ST + } + + if (ok) + { + vdp_st = vdp_output_surface_render_bitmap_surface( + pipOutputSurface, + NULL, + pipAlpha, + NULL, + NULL, + &osd_blend, + 0 + ); + CHECK_ST + } + + if (ok) + { + pipReady = 2; // for double rate deint + pipPosition.x0 = position.left(); + pipPosition.y0 = position.top(); + pipPosition.x1 = position.left() + position.width(); + pipPosition.y1 = position.top() + position.height(); + } + + return ok; +} + diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/util-vdpau.h mythtv/libs/libmythtv/util-vdpau.h --- mythtv.ori/libs/libmythtv/util-vdpau.h 1970-01-01 10:00:00.000000000 +1000 +++ mythtv/libs/libmythtv/util-vdpau.h 2009-02-10 14:02:47.000000000 +1100 @@ -0,0 +1,197 @@ +#ifndef UTIL_VDPAU_H_ +#define UTIL_VDPAU_H_ + +extern "C" { +#include "../libavcodec/vdpau_render.h" +} + +#include "videobuffers.h" + +class VDPAUContext +{ + public: + VDPAUContext(); + ~VDPAUContext(); + + bool Init(Display *disp, int screen, Window win, + QSize screen_size, bool color_control, + MythCodecID mcodecid); + void Deinit(void); + bool IsErrored(void) { return errored; } + void SetErrored(void) { errored = true; } + + bool InitBuffers(int width, int height, int numbufs, + LetterBoxColour letterbox_colour); + void FreeBuffers(void); + void *GetRenderData(int i) + { if (i < numSurfaces && i >= 0) return (void*)&(surface_render[i]); + return NULL; + } + int GetNumBufs(void) { return numSurfaces; } + + bool InitOutput(QSize size); + void FreeOutput(void); + + void Decode(VideoFrame *frame); + + void PrepareVideo(VideoFrame *frame, QRect video_rect, + QRect display_video_rect, + QSize screen_size, FrameScanType scan, + bool pause_frame); + void DisplayNextFrame(void); + void SetNextFrameDisplayTimeOffset(int delayus); + bool InitOSD(QSize size); + void UpdateOSD(void* const planes[3], QSize size, + void* const alpha[1]); + void DisableOSD(void) { osdReady = false; } + void DeinitOSD(void); + + bool SetDeinterlacer(const QString &deint); + bool SetDeinterlacing(bool interlaced); + QString GetDeinterlacer(void) const + { return deinterlacer; } + bool IsBeingUsed(VideoFrame * frame); + void ClearReferenceFrames(void) { referenceFrames.clear(); } + + static bool CheckCodecSupported(MythCodecID myth_codec_id); + PictureAttributeSupported GetSupportedPictureAttributes(void) const; + int SetPictureAttribute(PictureAttribute attributeType, int newValue); + + bool ShowPiP(VideoFrame * frame, QRect position); + + private: + bool InitProcs(Display *disp, int screen); + void DeinitProcs(void); + void ClearScreen(void); + + bool InitFlipQueue(Window win); + void DeinitFlipQueue(void); + + void AddOutputSurfaces(void); + bool UpdateReferenceFrames(VideoFrame *frame); + bool InitColorControl(void); + bool SetPictureAttributes(void); + + bool InitPiP(QSize vid_size); + void DeinitPip(void); + + int nextframedelay; + VdpTime lastframetime; + + int pix_fmt; + + uint maxVideoWidth; + uint maxVideoHeight; + VdpVideoSurface *videoSurfaces; + vdpau_render_state_t *surface_render; + int checkVideoSurfaces; + int numSurfaces; + + vector outputSurfaces; + VdpVideoSurface videoSurface; + VdpOutputSurface outputSurface; + bool checkOutputSurfaces; + QSize outputSize; + + VdpDecoder decoder; + uint32_t maxReferences; + VdpVideoMixer videoMixer; + + VdpRect outRect; + VdpRect outRectVid; + + int surfaceNum; + + VdpVideoSurface osdVideoSurface; + VdpOutputSurface osdOutputSurface; + VdpVideoMixer osdVideoMixer; + VdpBitmapSurface osdAlpha; + VdpLayer osdLayer; + VdpRect osdRect; + bool osdReady; + QSize osdSize; + + bool deintAvail; + QString deinterlacer; + bool deinterlacing; + long long currentFrameNum; + frame_queue_t referenceFrames; + bool needDeintRefs; + + bool useColorControl; + VdpCSCMatrix cscMatrix; + VdpProcamp proCamp; + + QSize pipFrameSize; + VdpLayer pipLayer; + VdpVideoSurface pipVideoSurface; + VdpOutputSurface pipOutputSurface; + VdpVideoMixer pipVideoMixer; + int pipReady; + VdpRect pipPosition; + VdpBitmapSurface pipAlpha; + + VdpPresentationQueueTarget vdp_flip_target; + VdpPresentationQueue vdp_flip_queue; + + bool vdpauDecode; + + VdpDevice vdp_device; + bool errored; + + VdpGetProcAddress * vdp_get_proc_address; + VdpDeviceDestroy * vdp_device_destroy; + VdpGetErrorString * vdp_get_error_string; + VdpGetApiVersion * vdp_get_api_version; + VdpGetInformationString * vdp_get_information_string; + + VdpVideoSurfaceCreate * vdp_video_surface_create; + VdpVideoSurfaceDestroy * vdp_video_surface_destroy; + VdpVideoSurfaceGetBitsYCbCr * vdp_video_surface_put_bits_y_cb_cr; + VdpVideoSurfacePutBitsYCbCr * vdp_video_surface_get_bits_y_cb_cr; + VdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities * + vdp_video_surface_query_get_put_bits_y_cb_cr_capabilities; + VdpVideoSurfaceQueryCapabilities * vdp_video_surface_query_capabilities; + + VdpOutputSurfacePutBitsYCbCr * vdp_output_surface_put_bits_y_cb_cr; + VdpOutputSurfacePutBitsNative * vdp_output_surface_put_bits_native; + VdpOutputSurfaceCreate * vdp_output_surface_create; + VdpOutputSurfaceDestroy * vdp_output_surface_destroy; + VdpOutputSurfaceRenderBitmapSurface * vdp_output_surface_render_bitmap_surface; + VdpOutputSurfaceQueryCapabilities * vdp_output_surface_query_capabilities; + + /* videoMixer puts videoSurface data to displayble ouputSurface. */ + VdpVideoMixerCreate * vdp_video_mixer_create; + VdpVideoMixerSetFeatureEnables * vdp_video_mixer_set_feature_enables; + VdpVideoMixerDestroy * vdp_video_mixer_destroy; + VdpVideoMixerRender * vdp_video_mixer_render; + VdpVideoMixerSetAttributeValues * vdp_video_mixer_set_attribute_values; + VdpVideoMixerQueryFeatureSupport * vdp_video_mixer_query_feature_support; + VdpVideoMixerQueryAttributeSupport * vdp_video_mixer_query_attribute_support; + VdpVideoMixerQueryParameterSupport * vdp_video_mixer_query_parameter_support; + VdpGenerateCSCMatrix * vdp_generate_csc_matrix; + + VdpPresentationQueueTargetDestroy * vdp_presentation_queue_target_destroy; + VdpPresentationQueueCreate * vdp_presentation_queue_create; + VdpPresentationQueueDestroy * vdp_presentation_queue_destroy; + VdpPresentationQueueDisplay * vdp_presentation_queue_display; + VdpPresentationQueueBlockUntilSurfaceIdle * vdp_presentation_queue_block_until_surface_idle; + VdpPresentationQueueTargetCreateX11 * vdp_presentation_queue_target_create_x11; + VdpPresentationQueueQuerySurfaceStatus * vdp_presentation_queue_query_surface_status; + VdpPresentationQueueGetTime * vdp_presentation_queue_get_time; + VdpPresentationQueueSetBackgroundColor * vdp_presentation_queue_set_background_color; + + VdpDecoderCreate * vdp_decoder_create; + VdpDecoderDestroy * vdp_decoder_destroy; + VdpDecoderRender * vdp_decoder_render; + + VdpBitmapSurfaceCreate * vdp_bitmap_surface_create; + VdpBitmapSurfaceDestroy * vdp_bitmap_surface_destroy; + VdpBitmapSurfacePutBitsNative * vdp_bitmap_surface_put_bits_native; + VdpBitmapSurfaceQueryCapabilities * vdp_bitmap_surface_query_capabilities; + + VdpPreemptionCallbackRegister * vdp_preemption_callback_register; +}; + +#endif + diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/videobuffers.cpp mythtv/libs/libmythtv/videobuffers.cpp --- mythtv.ori/libs/libmythtv/videobuffers.cpp 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libmythtv/videobuffers.cpp 2009-02-10 14:01:34.000000000 +1100 @@ -10,10 +10,14 @@ #include "fourcc.h" #include "compat.h" -#ifdef USING_XVMC +#if defined(USING_XVMC) || defined(USING_VDPAU) #include "videoout_xv.h" // for xvmc stuff #endif +#ifdef USING_VDPAU +#include "util-vdpau.h" +#endif + #define DEBUG_FRAME_LOCKS 0 #define TRY_LOCK_SPINS 100 @@ -1157,6 +1161,36 @@ return ok; } +#ifdef USING_VDPAU +bool VideoBuffers::CreateBuffers(int width, int height, VDPAUContext *ctx) +{ + static unsigned char *ffmpeg_vdpau_hack = (unsigned char*) + "avlib should not use this private data in VDPAU mode."; + + if (!ctx) + return false; + + if ((uint)ctx->GetNumBufs() != allocSize()) + { + VERBOSE(VB_IMPORTANT, QString("VideoBuffers::CreateBuffers") + + QString("VDPAUContext buffer count %1 does not agree " + "with the VideoBuffers buffer countr %2") + .arg(ctx->GetNumBufs()).arg(allocSize())); + return false; + } + + for (uint i = 0; i < allocSize(); i++) + { + init(&buffers[i], + FMT_VDPAU, (unsigned char*)ctx->GetRenderData(i), + width, height, -1, 0); + buffers[i].priv[0] = ffmpeg_vdpau_hack; + buffers[i].priv[1] = ffmpeg_vdpau_hack; + } + return true; +} +#endif + #ifdef USING_XVMC bool VideoBuffers::CreateBuffers(int width, int height, Display *disp, diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/videobuffers.h mythtv/libs/libmythtv/videobuffers.h --- mythtv.ori/libs/libmythtv/videobuffers.h 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libmythtv/videobuffers.h 2009-02-10 14:01:34.000000000 +1100 @@ -17,6 +17,10 @@ #include #endif // USING_XVMC +#ifdef USING_VDPAU +class VDPAUContext; +#endif + typedef MythDeque frame_queue_t; typedef vector frame_vector_t; typedef map frame_map_t; @@ -151,6 +155,10 @@ vector surfs); #endif +#ifdef USING_VDPAU + bool CreateBuffers(int width, int height, VDPAUContext *ctx); +#endif + QString GetStatus(int n=-1) const; // debugging method private: frame_queue_t *queue(BufferType type); diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/videodisplayprofile.cpp mythtv/libs/libmythtv/videodisplayprofile.cpp --- mythtv.ori/libs/libmythtv/videodisplayprofile.cpp 2009-01-23 16:38:27.000000000 +1100 +++ mythtv/libs/libmythtv/videodisplayprofile.cpp 2009-02-10 14:01:34.000000000 +1100 @@ -570,8 +570,9 @@ list += "xvmc-vld"; list += "macaccel"; list += "ivtv"; + list += "vdpau"; - return list; + return list; } QStringList VideoDisplayProfile::GetDecoderNames(void) @@ -600,6 +601,7 @@ dec_name["xvmc-vld"] = QObject::tr("VIA XvMC"); dec_name["macaccel"] = QObject::tr("Mac hardware acceleration"); dec_name["ivtv"] = QObject::tr("PVR-350 decoder"); + dec_name["vdpau"] = QObject::tr("NVidia VDPAU acceleration"); } pref_map_t::const_iterator it = dec_name.find(decoder); @@ -650,6 +652,11 @@ "high quality playback. This requires that the ivtv-fb " "kernel module is also loaded and configured properly."); + if (decoder == "vdpau") + msg += QObject::tr( + "VDPAU will attempt to use the graphics hardware to " + "accelerate video decoding and playback."); + return msg; } @@ -689,6 +696,24 @@ return QObject::tr("Linear blend (2x, HW)"); else if ("opengldoubleratefieldorder" == short_name) return QObject::tr("Interlaced (2x, Hw)"); + else if ("opengldoublerateyadif" == short_name) + return QObject::tr("Yadif (2x, Hw)"); + else if ("openglyadif" == short_name) + return QObject::tr("Yadif (Hw)"); + + else if ("vdpauonefield" == short_name) + return QObject::tr("One Field (1x, Hw)"); + else if ("vdpaubobdeint" == short_name) + return QObject::tr("Bob (2x, Hw)"); + else if ("vdpaubasic" == short_name) + return QObject::tr("Temporal (1x, Hw)"); + else if ("vdpaubasicdoublerate" == short_name) + return QObject::tr("Temporal (2x, Hw)"); + else if ("vdpauadvanced" == short_name) + return QObject::tr("Advanced (1x, Hw)"); + else if ("vdpauadvanceddoublerate" == short_name) + return QObject::tr("Advanced (2x, Hw)"); + return ""; } @@ -1156,6 +1181,12 @@ "additional resources."); } + if (renderer == "vdpau") + { + msg = QObject::tr( + "This is the only video renderer for NVidia VDPAU decoding."); + } + return msg; } @@ -1245,6 +1276,8 @@ msg = kLinearBlendMsg + " " + kUsingOpenGL; else if (deint == "openglkerneldeint") msg = kKernelMsg + " " + kUsingOpenGL; + else if (deint == "openglyadif") + msg = kYadifMsg + " " + kUsingOpenGL; else if (deint == "opengldoubleratelinearblend") msg = kLinearBlendMsg + " " + kUsingOpenGLWorkaround; else if (deint == "opengldoublerateonefield") @@ -1261,6 +1294,8 @@ msg = kYadifMsg; else if (deint == "yadifdoubleprocessdeint") msg = kYadifMsg + " " + kDoubleRateMsg; + else if (deint == "opengldoublerateyadif") + msg = kYadifMsg + " " + kUsingOpenGLWorkaround; else msg = QObject::tr("'%1' has not been documented yet.").arg(deint); @@ -1397,6 +1432,7 @@ "xvmc-vld" "macaccel" "ivtv" +"vdpau" // Video Renderers "null" @@ -1411,6 +1447,7 @@ "quartz-accel" "ivtv" "opengl" +"vdpau" // OSD Renderers "chromakey" @@ -1420,6 +1457,7 @@ "opengl" "opengl2" "opengl3" +"vdpau" // deinterlacers "none" @@ -1435,10 +1473,18 @@ "openglkerneldeint" "openglonefield" "openglbobdeint" +"openglyadif" "opengldoubleratelinearblend" "opengldoublerateonefield" "opengldoubleratekerneldeint" "opengldoubleratefieldorder" +"opengldoublerateyadif" +"vdpauonefield" +"vdpaubobdeint" +"vdpaubasic" +"vdpauadvanced" +"vdpaubasicdoublerate" +"vdpauadvanceddoublerate" */ void VideoDisplayProfile::init_statics(void) @@ -1495,6 +1541,17 @@ safe_deint["opengl"] += "opengldoublerateonefield"; safe_deint["opengl"] += "opengldoubleratekerneldeint"; safe_deint["opengl"] += "opengldoubleratefieldorder"; + safe_deint["opengl"] += "opengldoublerateyadif"; + safe_deint["opengl"] += "openglyadif"; + + + safe_deint["vdpau"] += "none"; + safe_deint["vdpau"] += "vdpauonefield"; + safe_deint["vdpau"] += "vdpaubobdeint"; + safe_deint["vdpau"] += "vdpaubasic"; + safe_deint["vdpau"] += "vdpauadvanced"; + safe_deint["vdpau"] += "vdpaubasicdoublerate"; + safe_deint["vdpau"] += "vdpauadvanceddoublerate"; safe_osd["xv-blit"] += "softblend"; safe_osd["xvmc-blit"] += "chromakey"; @@ -1503,6 +1560,7 @@ safe_osd["ivtv"] += "ivtv"; safe_osd["opengl"] += "opengl2"; safe_osd["quartz-accel"]+= "opengl3"; + safe_osd["vdpau"] += "vdpau"; // These video renderers do not support deinterlacing in MythTV safe_deint["quartz-accel"] += "none"; @@ -1531,10 +1589,12 @@ safe_renderer["xvmc-vld"] += "xvmc-blit"; safe_renderer["dummy"] += "xvmc-opengl"; safe_renderer["xvmc"] += "xvmc-opengl"; + safe_renderer["ffmpeg"] += "vdpau"; safe_renderer["dummy"] += "quartz-accel"; safe_renderer["macaccel"] += "quartz-accel"; safe_renderer["ivtv"] += "ivtv"; + safe_renderer["vdpau"] += "vdpau"; safe_renderer_priority["null"] = 10; safe_renderer_priority["xlib"] = 20; @@ -1542,6 +1602,7 @@ safe_renderer_priority["xv-blit"] = 90; safe_renderer_priority["xvmc-blit"] = 110; safe_renderer_priority["xvmc-opengl"] = 100; + safe_renderer_priority["vdpau"] = 120; safe_renderer_priority["directfb"] = 60; safe_renderer_priority["directx"] = 50; safe_renderer_priority["direct3d"] = 55; @@ -1559,4 +1620,5 @@ safe_equiv_dec["xvmc-vld"] += "dummy"; safe_equiv_dec["macaccel"] += "dummy"; safe_equiv_dec["ivtv"] += "dummy"; + safe_equiv_dec["vdpau"] += "dummy"; } diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/videoout_xv.cpp mythtv/libs/libmythtv/videoout_xv.cpp --- mythtv.ori/libs/libmythtv/videoout_xv.cpp 2009-01-23 16:38:27.000000000 +1100 +++ mythtv/libs/libmythtv/videoout_xv.cpp 2009-02-11 15:57:17.000000000 +1100 @@ -38,13 +38,18 @@ #include "xvmctextures.h" // MythTV General headers -#include "mythconfig.h" #include "mythcontext.h" +#include "mythverbose.h" #include "filtermanager.h" #include "videodisplayprofile.h" #define IGNORE_TV_PLAY_REC #include "tv.h" #include "fourcc.h" +#include "mythmainwindow.h" + +#ifdef USING_VDPAU +#include "util-vdpau.h" +#endif // MythTV OpenGL headers #include "openglcontext.h" @@ -71,18 +76,25 @@ #define XVMC_CHROMA_FORMAT_420 0x00000001 #endif -static QStringList allowed_video_renderers(MythCodecID codec_id, - Display *XJ_disp); +#define NUM_VDPAU_BUFFERS 17 + +static QStringList allowed_video_renderers( + MythCodecID codec_id, Display *display, int screen, Window curwin); static void SetFromEnv(bool &useXvVLD, bool &useXvIDCT, bool &useXvMC, - bool &useXV, bool &useShm, bool &useOpenGL); -static void SetFromHW(Display *d, bool &useXvMC, bool &useXV, - bool &useShm, bool &useXvMCOpenGL, bool &useOpenGL); + bool &useXV, bool &useShm, bool &useOpenGL, + bool &useVDPAU); +static void SetFromHW(Display *d, int screen, Window curwin, + bool &useXvMC, bool &useXV, + bool &useShm, bool &useXvMCOpenGL, + bool &useOpenGL, bool &useVDPAU, + MythCodecID myth_codec_id); static int calc_hue_base(const QString &adaptor_name); const char *vr_str[] = { - "unknown", "xlib", "xshm", "opengl", "xv-blit", "xvmc", "xvmc", "xvmc", + "unknown", "xlib", "xshm", "opengl", "xv-blit", "vdpau", "xvmc", "xvmc", + "xvmc", }; /** \class VideoOutputXv @@ -116,11 +128,16 @@ xvmc_osd_lock(false), xvmc_tex(NULL), +#ifdef USING_VDPAU + vdpau(NULL), +#endif + vdpau_use_osd(false), vdpau_use_pip(true), + xv_port(-1), xv_hue_base(0), xv_colorkey(0), xv_draw_colorkey(false), xv_chroma(0), - gl_context_lock(false), gl_context(NULL), + gl_context_lock(true), gl_context(NULL), gl_videochain(NULL), gl_pipchain(NULL), gl_osdchain(NULL), @@ -215,9 +232,9 @@ needrepaint = true; } - if (gl_videochain) + if (gl_videochain && gl_context) { - QMutexLocker locker(&gl_context_lock); + OpenGLContextLocker ctx_lock(gl_context); gl_videochain->SetVideoRect(display_video_rect, video_rect); } } @@ -237,6 +254,8 @@ bool cid_changed = (myth_codec_id != av_codec_id); bool res_changed = input_size != video_disp_dim; bool asp_changed = aspect != video_aspect; + bool gpu_deint = (VideoOutputSubType() == OpenGL || + VideoOutputSubType() == XVideoVDPAU); VideoOutput::InputChanged(input_size, aspect, av_codec_id, codec_private); @@ -251,12 +270,14 @@ bool ok = true; - DeleteBuffers(VideoOutputSubType(), - cid_changed || (OpenGL == VideoOutputSubType())); + bool delete_pause_frame = cid_changed || (OpenGL == VideoOutputSubType()); + DeleteBuffers(VideoOutputSubType(), delete_pause_frame); + ResizeForVideo((uint) video_disp_dim.width(), (uint) video_disp_dim.height()); - if (cid_changed && (OpenGL != VideoOutputSubType())) + if (cid_changed || + XVideoVDPAU == VideoOutputSubType() || OpenGL == VideoOutputSubType()) { myth_codec_id = av_codec_id; @@ -273,13 +294,9 @@ ok = InitSetupBuffers(); } - else if (OpenGL != VideoOutputSubType()) - ok = CreateBuffers(VideoOutputSubType()); - - if (OpenGL == VideoOutputSubType()) + else { - myth_codec_id = av_codec_id; - ok = InitSetupBuffers(); + ok = CreateBuffers(VideoOutputSubType()); } MoveResize(); @@ -290,6 +307,15 @@ "Failed to recreate buffers"); errored = true; } + else + { + if ((VideoOutputSubType() == OpenGL || + VideoOutputSubType() == XVideoVDPAU) || + gpu_deint) + { + BestDeint(); + } + } return ok; } @@ -302,8 +328,11 @@ QSize dvr2 = QSize(display_visible_rect.width() & ~0x3, display_visible_rect.height() & ~0x1); - if (!chroma_osd && !gl_use_osd_opengl2) - return VideoOutput::GetVisibleOSDBounds(visible_aspect, font_scaling, themeaspect); + if (!chroma_osd && !gl_use_osd_opengl2 && !vdpau_use_osd) + { + return VideoOutput::GetVisibleOSDBounds( + visible_aspect, font_scaling, themeaspect); + } float dispPixelAdj = 1.0f; if (dvr2.height() && dvr2.width()) @@ -319,7 +348,8 @@ QSize dvr2 = QSize(display_visible_rect.width() & ~0x3, display_visible_rect.height() & ~0x1); - QSize sz = (chroma_osd || gl_use_osd_opengl2) ? dvr2 : video_disp_dim; + QSize sz = (chroma_osd || gl_use_osd_opengl2 || vdpau_use_osd) ? + dvr2 : video_disp_dim; return QRect(QPoint(0,0), sz); } @@ -854,18 +884,34 @@ */ bool VideoOutputXv::InitVideoBuffers(MythCodecID mcodecid, bool use_xv, bool use_shm, - bool use_opengl) + bool use_opengl, bool use_vdpau) { (void)mcodecid; bool done = false; + +#ifdef USING_VDPAU + if (((kCodec_VDPAU_BEGIN < mcodecid) && (mcodecid < kCodec_VDPAU_END)) || + mcodecid < kCodec_NORMAL_END) + { + if (use_vdpau) + { + vbuffers.Init(NUM_VDPAU_BUFFERS, false, 1, 4, 4, 1, false); + done = InitVDPAU(mcodecid); + if (!done) + vbuffers.Reset(); + } + } +#endif + // If use_xvmc try to create XvMC buffers #ifdef USING_XVMC - if (mcodecid > kCodec_NORMAL_END) + if (!done && (kCodec_STD_XVMC_BEGIN < mcodecid) && + (mcodecid < kCodec_VLD_END)) { // Create ffmpeg VideoFrames - bool vld, idct, mc; - myth2av_codecid(myth_codec_id, vld, idct, mc); + bool vld, idct, mc, vdpau; + myth2av_codecid(myth_codec_id, vld, idct, mc, vdpau); vbuffers.Init(xvmc_buf_attr->GetNumSurf(), false /* create an extra frame for pause? */, @@ -883,6 +929,14 @@ } #endif // USING_XVMC + if (!done && mcodecid >= kCodec_NORMAL_END) + { + VERBOSE(VB_IMPORTANT, LOC_ERR + + QString("Failed to initialize buffers for codec %1") + .arg(toString(mcodecid))); + return false; + } + // Create ffmpeg VideoFrames if (!done) vbuffers.Init(31, true, 1, 12, 4, 2, false); @@ -918,33 +972,34 @@ #ifdef USING_OPENGL_VIDEO ok = gl_context; - gl_context_lock.lock(); + gl_context_lock.lock(); if (!ok) { - gl_context = new OpenGLContext(); + gl_context = new OpenGLContext(&gl_context_lock); ok = gl_context->Create( XJ_disp, XJ_win, XJ_screen_num, - display_visible_rect.size(), true); + display_visible_rect, + db_use_picture_controls); } + gl_context_lock.unlock(); + if (ok) { + OpenGLContextLocker ctx_lock(gl_context); gl_context->Show(); - gl_context->MakeCurrent(true); gl_videochain = new OpenGLVideo(); ok = gl_videochain->Init(gl_context, db_use_picture_controls, - true, video_dim, - display_visible_rect, - display_video_rect, video_rect, true); - gl_context->MakeCurrent(false); + video_dim, display_visible_rect, + display_video_rect, video_rect, true, + GetFilters()); } - gl_context_lock.unlock(); - if (ok) { + OpenGLContextLocker ctx_lock(gl_context); InstallXErrorHandler(XJ_disp); ok = CreateBuffers(OpenGL); @@ -963,23 +1018,6 @@ { video_output_subtype = OpenGL; allowpreviewepg = false; - - // ensure deinterlacing is re-enabled after input change - bool temp_deinterlacing = m_deinterlacing; - - if (!m_deintfiltername.isEmpty() && - !m_deintfiltername.contains("opengl")) - { - QMutexLocker locker(&gl_context_lock); - gl_videochain->SetSoftwareDeinterlacer(m_deintfiltername); - } - - SetDeinterlacingEnabled(true); - - if (!temp_deinterlacing) - { - SetDeinterlacingEnabled(false); - } } } @@ -1075,6 +1113,54 @@ #endif // USING_XVMC } +/** + * \fn VideoOutputXv::InitVDPAU(MythCodecID) + * Creates and initializes video buffers. + * + * \sideeffect sets video_output_subtype if it succeeds. + * + * \return success or failure at creating any buffers. + */ +bool VideoOutputXv::InitVDPAU(MythCodecID mcodecid) +{ + (void)mcodecid; +#ifdef USING_VDPAU + vdpau = new VDPAUContext(); + + bool ok = vdpau->Init(XJ_disp, XJ_screen_num, XJ_curwin, + display_visible_rect.size(), + db_use_picture_controls, mcodecid); + if (!ok) + { + VERBOSE(VB_IMPORTANT, "Unable to init VDPAU"); + vdpau->Deinit(); + delete vdpau; + vdpau = NULL; + return ok; + } + + ok = CreateVDPAUBuffers(); + if (!ok) + { + VERBOSE(VB_IMPORTANT, "Unable to create VDPAU buffers"); + DeleteBuffers(XVideoVDPAU, false); + return ok; + } + else + { + VERBOSE(VB_PLAYBACK, LOC + + QString("Created VDPAU context (%1 decode)") + .arg((mcodecid < kCodec_NORMAL_END) ? "software" : "GPU")); + } + + video_output_subtype = XVideoVDPAU; + allowpreviewepg = false; + return ok; +#else // USING_VDPAU + return false; +#endif // USING_VDPAU +} + static bool has_format(XvImageFormatValues *formats, int format_cnt, int id) { for (int i = 0; i < format_cnt; i++) @@ -1271,32 +1357,42 @@ if (force_xv) return (MythCodecID)(kCodec_MPEG1 + (stream_type-1)); -#ifdef USING_XVMC + +#if defined(USING_XVMC) || defined(USING_VDPAU) VideoDisplayProfile vdp; vdp.SetInput(QSize(width, height)); QString dec = vdp.GetDecoder(); if ((dec == "libmpeg2") || (dec == "ffmpeg")) return (MythCodecID)(kCodec_MPEG1 + (stream_type-1)); - Display *disp = MythXOpenDisplay(); - // Disable features based on environment and DB values. bool use_xvmc_vld = false, use_xvmc_idct = false, use_xvmc = false; bool use_xv = true, use_shm = true, use_opengl = true; + bool use_vdpau = false; if (dec == "xvmc") use_xvmc_idct = use_xvmc = true; else if (dec == "xvmc-vld") use_xvmc_vld = use_xvmc = true; + else if (dec == "vdpau") + use_vdpau = true; SetFromEnv(use_xvmc_vld, use_xvmc_idct, use_xvmc, use_xv, - use_shm, use_opengl); + use_shm, use_opengl, use_vdpau); // Disable features based on hardware capabilities. bool use_xvmc_opengl = use_xvmc; - SetFromHW(disp, use_xvmc, use_xv, use_shm, use_xvmc_opengl, use_opengl); + Display *disp = MythXOpenDisplay(); + X11L; + int screen = DefaultScreen(disp); + Window root = DefaultRootWindow(disp); + X11U; + SetFromHW(disp, screen, root, use_xvmc, use_xv, use_shm, + use_xvmc_opengl, use_opengl, use_vdpau, + (MythCodecID)(kCodec_MPEG1_VDPAU + (stream_type-1))); MythCodecID ret = (MythCodecID)(kCodec_MPEG1 + (stream_type-1)); +#ifdef USING_XVMC if (use_xvmc_vld && XvMCSurfaceTypes::has(disp, XvVLD, stream_type, xvmc_chroma, width, height, osd_width, osd_height)) @@ -1319,11 +1415,9 @@ bool ok = true; if (test_surface && ret > kCodec_NORMAL_END) { - Window root; XvMCSurfaceInfo info; ok = false; - X11S(root = DefaultRootWindow(disp)); int port = GrabSuitableXvPort(disp, root, ret, width, height, xvmc_chroma, &info); if (port >= 0) @@ -1340,7 +1434,6 @@ X11U; } } - X11S(XCloseDisplay(disp)); X11S(ok |= cnt_open_xv_port() > 0); // also ok if we already opened port.. if (!ok) @@ -1357,17 +1450,34 @@ "\t\t\tvendor's XvMC library.\n"; #endif // USING_XVMCW VERBOSE(VB_IMPORTANT, msg); - ret = (MythCodecID)(kCodec_MPEG1 + (stream_type-1)); } +#endif // USING_XVMC + + X11S(XCloseDisplay(disp)); + +#ifdef USING_VDPAU + if (use_vdpau) + ret = (MythCodecID)(kCodec_MPEG1_VDPAU + (stream_type-1)); +#endif // USING_VDPAU return ret; -#else // if !USING_XVMC - return (MythCodecID)(kCodec_MPEG1 + (stream_type-1)); -#endif // !USING_XVMC +#endif // defined(USING_XVMC) || defined(USING_VDPAU) } bool VideoOutputXv::InitOSD(const QString &osd_renderer) { +#ifdef USING_VDPAU + if (osd_renderer == "vdpau" && vdpau) + { + vdpau_use_osd = true; + if (!vdpau->InitOSD(GetTotalOSDBounds().size())) + { + vdpau_use_osd = false; + VERBOSE(VB_IMPORTANT, LOC + "Init VDPAU osd failed."); + } + return vdpau_use_osd; + } +#endif if (osd_renderer == "opengl") { xvmc_tex = XvMCTextures::Create( @@ -1387,19 +1497,18 @@ return xvmc_tex; } - if (osd_renderer == "opengl2") + if (osd_renderer == "opengl2" && gl_context) { - QMutexLocker locker(&gl_context_lock); + OpenGLContextLocker ctx_lock(gl_context); gl_use_osd_opengl2 = true; - gl_context->MakeCurrent(true); - gl_osdchain = new OpenGLVideo(); if (!gl_osdchain->Init( - gl_context, false, true, + gl_context, db_use_picture_controls, GetTotalOSDBounds().size(), GetTotalOSDBounds(), display_visible_rect, - QRect(QPoint(0, 0), GetTotalOSDBounds().size()), false, true)) + QRect(QPoint(0, 0), GetTotalOSDBounds().size()), false, + GetFilters(), true)) { VERBOSE(VB_PLAYBACK, LOC_ERR + "InitOSD(): Failed to create OpenGL2 OSD"); @@ -1411,8 +1520,6 @@ { gl_osdchain->SetMasterViewport(gl_videochain->GetViewPort()); } - - gl_context->MakeCurrent(false); } if (osd_renderer == "chromakey") @@ -1502,7 +1609,8 @@ { // Figure out what video renderer to use db_vdisp_profile->SetInput(video_dim); - QStringList renderers = allowed_video_renderers(myth_codec_id, XJ_disp); + QStringList renderers = allowed_video_renderers( + myth_codec_id, XJ_disp, XJ_screen_num, XJ_curwin); QString renderer = QString::null; QString tmp = db_vdisp_profile->GetVideoRenderer(); @@ -1534,13 +1642,17 @@ bool use_xv = (renderer.left(2) == "xv"); bool use_shm = (renderer == "xshm"); bool use_opengl = (renderer == "opengl"); - bool ok = InitVideoBuffers(myth_codec_id, use_xv, use_shm, use_opengl); + bool use_vdpau = (renderer == "vdpau"); + bool ok = InitVideoBuffers(myth_codec_id, use_xv, use_shm, + use_opengl, use_vdpau); if (!ok) { use_xv = renderers.contains("xv-blit"); use_shm = renderers.contains("xshm"); use_opengl = renderers.contains("opengl"); - ok = InitVideoBuffers(myth_codec_id, use_xv, use_shm, use_opengl); + use_vdpau = renderers.contains("vdpau"); + ok = InitVideoBuffers(myth_codec_id, use_xv, use_shm, + use_opengl, use_vdpau); } XV_INIT_FATAL_ERROR_TEST(!ok, "Failed to get any video output"); @@ -1550,13 +1662,15 @@ InitOSD(osdrenderer); // Initialize chromakeying, if we need to - if (!xvmc_tex && video_output_subtype >= XVideo) + if (!xvmc_tex && video_output_subtype >= XVideo && + video_output_subtype != XVideoVDPAU) InitColorKey(true); // Check if we can actually use the OSD we want to use... if (!CheckOSDInit()) { - ok = InitVideoBuffers(myth_codec_id, use_xv, use_shm, use_opengl); + ok = InitVideoBuffers(myth_codec_id, use_xv, use_shm, + use_opengl, use_vdpau); XV_INIT_FATAL_ERROR_TEST(!ok, "Failed to get any video output (nCK)"); } @@ -1703,6 +1817,9 @@ // documented in videooutbase.cpp bool VideoOutputXv::SetDeinterlacingEnabled(bool enable) { + if (VideoOutputSubType() == XVideoVDPAU) + return SetDeinterlacingEnabledVDPAU(enable); + if (VideoOutputSubType() == OpenGL) return SetDeinterlacingEnabledOpenGL(enable); @@ -1714,6 +1831,9 @@ bool VideoOutputXv::SetupDeinterlace(bool interlaced, const QString& overridefilter) { + if (VideoOutputSubType() == XVideoVDPAU) + return SetupDeinterlaceVDPAU(interlaced, overridefilter); + if (VideoOutputSubType() == OpenGL) return SetupDeinterlaceOpenGL(interlaced, overridefilter); @@ -1722,16 +1842,64 @@ return deint; } +bool VideoOutputXv::SetDeinterlacingEnabledVDPAU(bool enable) +{ + (void)enable; +#ifdef USING_VDPAU + if (!vdpau) + return false; + + if (vdpau->GetDeinterlacer() != m_deintfiltername) + return SetupDeinterlace(enable); + + m_deinterlacing = vdpau->SetDeinterlacing(enable); +#endif // USING_VDPAU + return m_deinterlacing; +} + +bool VideoOutputXv::SetupDeinterlaceVDPAU( + bool interlaced, const QString &overridefilter) +{ + (void)interlaced; + (void)overridefilter; +#ifdef USING_VDPAU + // clear any software filters + if (m_deintFiltMan) + { + delete m_deintFiltMan; + m_deintFiltMan = NULL; + } + if (m_deintFilter) + { + delete m_deintFilter; + m_deintFilter = NULL; + } + + if (!vdpau) + return false; + + m_deintfiltername = db_vdisp_profile->GetFilteredDeint(overridefilter); + if (!m_deintfiltername.contains("vdpau")) + return false; + + vdpau->SetDeinterlacer(m_deintfiltername); + m_deinterlacing = vdpau->SetDeinterlacing(interlaced); +#endif// USING_VDPAU + return m_deinterlacing; +} + bool VideoOutputXv::SetDeinterlacingEnabledOpenGL(bool enable) { (void) enable; - if (!gl_videochain) + if (!gl_videochain || !gl_context) return false; if (enable && m_deinterlacing && (OpenGL != VideoOutputSubType())) return m_deinterlacing; + OpenGLContextLocker ctx_lock(gl_context); + if (enable) { if (m_deintfiltername == "") @@ -1744,9 +1912,7 @@ else if (!m_deintfiltername.contains("opengl")) { // make sure opengl deinterlacing is disabled - gl_context_lock.lock(); gl_videochain->SetDeinterlacing(false); - gl_context_lock.unlock(); if (!m_deintFiltMan || !m_deintFilter) return VideoOutput::SetupDeinterlace(enable); @@ -1754,10 +1920,7 @@ } if (gl_videochain) - { - QMutexLocker locker(&gl_context_lock); gl_videochain->SetDeinterlacing(enable); - } m_deinterlacing = enable; @@ -1770,13 +1933,16 @@ (void) interlaced; (void) overridefilter; + if (!gl_videochain || !gl_context) + return false; + + OpenGLContextLocker ctx_lock(gl_context); + m_deintfiltername = db_vdisp_profile->GetFilteredDeint(overridefilter); if (!m_deintfiltername.contains("opengl")) { - gl_context_lock.lock(); gl_videochain->SetDeinterlacing(false); - gl_context_lock.unlock(); gl_videochain->SetSoftwareDeinterlacer(QString::null); @@ -1806,8 +1972,6 @@ if (!gl_videochain) return false; - QMutexLocker locker(&gl_context_lock); - if (m_deinterlacing && !m_deintfiltername.isEmpty()) { if (gl_videochain->GetDeinterlacer() != m_deintfiltername) @@ -1835,7 +1999,7 @@ } /** - * \fn VideoOutput::NeedsDoubleFramerate() const + * \fn VideoOutput::ApproveDeintFilter(const QString&) const * Approves bobdeint filter for XVideo and XvMC surfaces, * rejects other filters for XvMC, and defers to * VideoOutput::ApproveDeintFilter(const QString&) @@ -1843,16 +2007,21 @@ * * \return whether current video output supports a specific filter. */ -bool VideoOutputXv::ApproveDeintFilter(const QString& filtername) const +bool VideoOutputXv::ApproveDeintFilter(const QString &filtername) const { // TODO implement bobdeint for non-Xv[MC] VOSType vos = VideoOutputSubType(); - if (filtername == "bobdeint" && (vos >= XVideo || vos == OpenGL)) + + if (XVideoVDPAU == vos) + return filtername.contains("vdpau"); + + if ((OpenGL == vos) && filtername.contains("opengl")) return true; - else if (vos > XVideo) - return false; - else - return VideoOutput::ApproveDeintFilter(filtername); + + if (filtername == "bobdeint" && (vos >= OpenGL) && (XVideoVDPAU != vos)) + return true; + + return VideoOutput::ApproveDeintFilter(filtername); } XvMCContext* VideoOutputXv::CreateXvMCContext( @@ -1892,6 +2061,44 @@ #endif // !USING_XVMC } +bool VideoOutputXv::CreateVDPAUBuffers(void) +{ +#ifdef USING_VDPAU + if (!vdpau) + return false; + + if (!vdpau->InitBuffers(video_dim.width(), video_dim.height(), + NUM_VDPAU_BUFFERS, db_letterbox_colour)) + { + vdpau->FreeBuffers(); + return false; + } + + bool ok = false; + + if (myth_codec_id > kCodec_VDPAU_BEGIN && + myth_codec_id < kCodec_VDPAU_END) + { + ok = vbuffers.CreateBuffers(video_dim.width(), + video_dim.height(), vdpau); + } + else if (myth_codec_id < kCodec_NORMAL_END) + { + ok = vbuffers.CreateBuffers(video_dim.width(), video_dim.height()); + } + + if (!ok) + { + DeleteBuffers(XVideoVDPAU, false); + return ok; + } + + return true; +#else + return false; +#endif +} + bool VideoOutputXv::CreateXvMCBuffers(void) { #ifdef USING_XVMC @@ -2172,7 +2379,9 @@ { bool ok = false; - if (subtype > XVideo && xv_port >= 0) + if (subtype == XVideoVDPAU) + ok = CreateVDPAUBuffers(); + else if (subtype > XVideo && xv_port >= 0) ok = CreateXvMCBuffers(); else if (subtype == XVideo && xv_port >= 0) { @@ -2262,6 +2471,17 @@ (void) subtype; DiscardFrames(true); +#ifdef USING_VDPAU + if (vdpau) + { + vdpau->FreeBuffers(); + vdpau->Deinit(); + delete vdpau; + vdpau = NULL; + vdpau_use_osd = false; + } +#endif + #ifdef USING_XVMC // XvMC buffers for (uint i=0; iMakeCurrent(true); if (gl_videochain) { @@ -2319,16 +2540,17 @@ delete gl_osdchain; gl_osdchain = NULL; } -#ifdef USING_OPENGL + if (gl_context) + { gl_context->Hide(); -#endif + gl_context->MakeCurrent(false); + } + gl_use_osd_opengl2 = false; gl_pip_ready = false; gl_osd_ready = false; allowpreviewepg = true; - - gl_context_lock.unlock(); // end OpenGL stuff vbuffers.DeleteBuffers(); @@ -2418,7 +2640,12 @@ bool displaying = false; if (!frame) return; - +#ifdef USING_VDPAU + if (vdpau && VideoOutputSubType() == XVideoVDPAU) + { + displaying = vdpau->IsBeingUsed(frame); + } +#endif // USING_VDPAU #ifdef USING_XVMC vbuffers.LockFrame(frame, "DiscardFrame -- XvMC display check"); if (frame && VideoOutputSubType() >= XVideoMC) @@ -2471,7 +2698,7 @@ VERBOSE(VB_PLAYBACK, LOC + "ClearAfterSeek()"); DiscardFrames(false); #ifdef USING_XVMC - if (VideoOutputSubType() > XVideo) + if (VideoOutputSubType() >= XVideoMC) { for (uint i=0; iClearReferenceFrames(); + } +#endif // USING_VDPAU vbuffers.DiscardFrames(next_frame_keyframe); VERBOSE(VB_PLAYBACK, LOC + QString("DiscardFrames() 3: %1 -- done()") .arg(vbuffers.GetStatus())); @@ -2586,6 +2820,19 @@ vbuffers.DoneDisplayingFrame(); return; } + +#ifdef USING_VDPAU + if (vdpau && VideoOutputSubType() == XVideoVDPAU) + { + if (vbuffers.size(kVideoBuffer_used)) + { + VideoFrame *frame = vbuffers.head(kVideoBuffer_used); + DiscardFrame(frame); + } + CheckFrameStates(); + return; + } +#endif // USING_VDPAU #ifdef USING_XVMC if (vbuffers.size(kVideoBuffer_used)) { @@ -2603,6 +2850,33 @@ #endif } +void VideoOutputXv::PrepareFrameVDPAU(VideoFrame *frame, FrameScanType scan) +{ + (void)frame; + (void)scan; + + bool pause = (frame == NULL); + // select the correct still frame on certain dvds + if (pause && vbuffers.size(kVideoBuffer_used)) + frame = vbuffers.head(kVideoBuffer_used); + + if (frame) + framesPlayed = frame->frameNumber + 1; + +#ifdef USING_VDPAU + if (!vdpau) + return; + + vdpau->PrepareVideo( + frame, video_rect, display_video_rect, + display_visible_rect.size(), scan, pause); + +#endif + + if (pause) + vbuffers.SetLastShownFrameToScratch(); +} + /** * \fn VideoOutputXv::PrepareFrameXvMC(VideoFrame*,FrameScanType) * @@ -2671,7 +2945,10 @@ { (void) t; - QMutexLocker locker(&gl_context_lock); + if (!gl_videochain || !gl_context) + return; + + OpenGLContextLocker ctx_lock(gl_context); if (!buffer) buffer = vbuffers.GetScratchFrame(); @@ -2682,7 +2959,6 @@ if (buffer->codec != FMT_YV12) return; - gl_context->MakeCurrent(true); gl_videochain->PrepareFrame(t, m_deinterlacing, framesPlayed); if (gl_pip_ready && gl_pipchain) @@ -2691,8 +2967,7 @@ if (gl_osd_ready && gl_osdchain) gl_osdchain->PrepareFrame(t, m_deinterlacing, framesPlayed); - gl_context->Flush(); - gl_context->MakeCurrent(false); + gl_context->Flush(false); if (vbuffers.GetScratchFrame() == buffer) vbuffers.SetLastShownFrameToScratch(); @@ -2811,7 +3086,9 @@ return; } - if (VideoOutputSubType() > XVideo) + if (VideoOutputSubType() == XVideoVDPAU) + PrepareFrameVDPAU(buffer, scan); + else if (VideoOutputSubType() > XVideo) PrepareFrameXvMC(buffer, scan); else if (VideoOutputSubType() == XVideo) PrepareFrameXv(buffer); @@ -2900,6 +3177,35 @@ #endif } +void VideoOutputXv::SetNextFrameDisplayTimeOffset(int delayus) +{ + (void)delayus; +#ifdef USING_VDPAU + if (!vdpau) + return; + + vdpau->SetNextFrameDisplayTimeOffset(delayus); +#endif +} + +void VideoOutputXv::ShowVDPAU(FrameScanType scan) +{ + (void)scan; +#ifdef USING_VDPAU + if (!vdpau) + return; + + if (vdpau->IsErrored()) + { + errored = true; + return; + } + + vdpau->DisplayNextFrame(); + CheckFrameStates(); +#endif +} + void VideoOutputXv::ShowXvMC(FrameScanType scan) { (void)scan; @@ -3056,22 +3362,22 @@ DrawUnusedRects(/* don't do a sync*/false); } - if (VideoOutputSubType() > XVideo) + if (VideoOutputSubType() == XVideoVDPAU) + ShowVDPAU(scan); + else if (VideoOutputSubType() > XVideo) ShowXvMC(scan); else if (VideoOutputSubType() == XVideo) ShowXVideo(scan); - else if (VideoOutputSubType() == OpenGL) - { - QMutexLocker locker(&gl_context_lock); + else if (VideoOutputSubType() == OpenGL && gl_context) gl_context->SwapBuffers(); - } X11S(XSync(XJ_disp, False)); } void VideoOutputXv::ShowPip(VideoFrame *frame, NuppelVideoPlayer *pipplayer) { - if (VideoOutputSubType() != OpenGL) + if (VideoOutputSubType() != OpenGL && + VideoOutputSubType() != XVideoVDPAU) { VideoOutput::ShowPip(frame, pipplayer); return; @@ -3101,14 +3407,26 @@ QRect position = GetPIPRect(db_pip_location, pipplayer); +#ifdef USING_VDPAU + if (vdpau && VideoOutputSubType() == XVideoVDPAU) + { + if (vdpau_use_pip) + vdpau_use_pip = vdpau->ShowPiP(pipimage, position); + + pipplayer->ReleaseCurrentFrame(pipimage); + return; + } +#endif // USING_VDPAU + if (!gl_pipchain) { VERBOSE(VB_PLAYBACK, LOC + "Initialise PiP."); gl_pipchain = new OpenGLVideo(); bool success = gl_pipchain->Init(gl_context, db_use_picture_controls, - true, QSize(pipVideoWidth, pipVideoHeight), + QSize(pipVideoWidth, pipVideoHeight), position, position, - QRect(0, 0, pipVideoWidth, pipVideoHeight), false); + QRect(0, 0, pipVideoWidth, pipVideoHeight), false, + GetFilters()); success &= gl_pipchain->AddDeinterlacer("openglonefield"); gl_pipchain->SetMasterViewport(gl_videochain->GetViewPort()); if (!success) @@ -3124,11 +3442,14 @@ { VERBOSE(VB_PLAYBACK, LOC + "Re-initialise PiP."); - bool success = gl_pipchain->ReInit( - gl_context, db_use_picture_controls, - true, QSize(pipVideoWidth, pipVideoHeight), - position, position, - QRect(0, 0, pipVideoWidth, pipVideoHeight), false); + delete gl_pipchain; + gl_pipchain = new OpenGLVideo(); + bool success = gl_pipchain->Init( + gl_context, db_use_picture_controls, + QSize(pipVideoWidth, pipVideoHeight), + position, position, + QRect(0, 0, pipVideoWidth, pipVideoHeight), false, + GetFilters()); gl_pipchain->SetMasterViewport(gl_videochain->GetViewPort()); if (!success) @@ -3158,6 +3479,23 @@ 456 - 0.00001); int boboff = use_bob ? boboff_raw : 0; + if (XVideoVDPAU == VideoOutputSubType()) + { + if (needrepaint) + { + X11L; + XSetForeground(XJ_disp, XJ_gc, 0x020202); + XFillRectangle(XJ_disp, XJ_curwin, XJ_gc, + display_visible_rect.left(), + display_visible_rect.top(), + display_visible_rect.width(), + display_visible_rect.height()); + X11U; + needrepaint = false; + } + return; + } + xv_need_bobdeint_repaint |= needrepaint; if (chroma_osd && chroma_osd->GetImage() && xv_need_bobdeint_repaint) @@ -3279,6 +3617,16 @@ if (VideoOutputSubType() <= XVideo) return; +#ifdef USING_VDPAU + if (VideoOutputSubType() == XVideoVDPAU) + { + if (!vdpau) + return; + vdpau->Decode(frame); + return; + } +#endif + #ifdef USING_XVMC xvmc_render_state_t *render = GetRender(frame); // disable questionable ffmpeg surface munging @@ -3406,6 +3754,12 @@ } vbuffers.UnlockFrame(&av_pause_frame, "UpdatePauseFrame - used"); } +#ifdef USING_VDPAU + else if (VideoOutputSubType() == XVideoVDPAU) + { + return; + } +#endif #ifdef USING_XVMC else { @@ -3459,6 +3813,14 @@ #endif } +void VideoOutputXv::ProcessFrameVDPAU(VideoFrame *frame, OSD *osd, + NuppelVideoPlayer *pipPlayer) +{ + if (vdpau_use_osd && osd) + DisplayOSD(frame, osd); + ShowPip(frame, pipPlayer); +} + void VideoOutputXv::ProcessFrameXvMC(VideoFrame *frame, OSD *osd) { (void)frame; @@ -3667,7 +4029,10 @@ (void) filterList; (void) pipPlayer; - QMutexLocker locker(&gl_context_lock); + if (!gl_videochain || !gl_context) + return; + + OpenGLContextLocker ctx_lock(gl_context); bool pauseframe = false; if (!frame) @@ -3677,9 +4042,6 @@ pauseframe = true; } - // disable image processing for offscreen rendering - gl_context->MakeCurrent(true); - if (filterList) filterList->ProcessFrame(frame); @@ -3700,10 +4062,10 @@ m_deintFilter->ProcessFrame(frame); } - if (gl_videochain) - gl_videochain->UpdateInputFrame(frame); + bool soft_bob = m_deinterlacing && (m_deintfiltername == "bobdeint"); - gl_context->MakeCurrent(false); + if (gl_videochain) + gl_videochain->UpdateInputFrame(frame, soft_bob); } void VideoOutputXv::ProcessFrameMem(VideoFrame *frame, OSD *osd, @@ -3769,7 +4131,9 @@ return; } - if (VideoOutputSubType() == OpenGL) + if (VideoOutputSubType() == XVideoVDPAU) + ProcessFrameVDPAU(frame, osd, pipPlayer); + else if (VideoOutputSubType() == OpenGL) ProcessFrameOpenGL(frame, osd, filterList, pipPlayer); else if (VideoOutputSubType() <= XVideo) ProcessFrameMem(frame, osd, filterList, pipPlayer); @@ -3784,10 +4148,24 @@ if (!supported_attributes) return -1; - if (VideoOutputSubType() == OpenGL) + if (VideoOutputSubType() == XVideoVDPAU) + { +#ifdef USING_VDPAU + if (vdpau) + { + newValue = min(max(newValue, 0), 100); + newValue = vdpau->SetPictureAttribute(attribute, newValue); + if (newValue >= 0) + SetPictureAttributeDBValue(attribute, newValue); + return newValue; + } +#endif // USING_VDPAU + } + + if (VideoOutputSubType() == OpenGL && gl_context) { newValue = min(max(newValue, 0), 100); - newValue = gl_videochain->SetPictureAttribute(attribute, newValue); + newValue = gl_context->SetPictureAttribute(attribute, newValue); if (newValue >= 0) SetPictureAttributeDBValue(attribute, newValue); return newValue; @@ -3846,9 +4224,16 @@ { supported_attributes = kPictureAttributeSupported_None; - if (VideoOutputSubType() == OpenGL) + if (VideoOutputSubType() == XVideoVDPAU) { - supported_attributes = gl_videochain->GetSupportedPictureAttributes(); +#ifdef USING_VDPAU + if (vdpau) + supported_attributes = vdpau->GetSupportedPictureAttributes(); +#endif //USING_VDPAU + } + else if (VideoOutputSubType() == OpenGL && gl_context) + { + supported_attributes = gl_context->GetSupportedPictureAttributes(); } else if (VideoOutputSubType() >= XVideo) { @@ -3882,6 +4267,38 @@ void VideoOutputXv::CheckFrameStates(void) { +#ifdef USING_VDPAU + if (vdpau && VideoOutputSubType() == XVideoVDPAU) + { + frame_queue_t::iterator it; + it = vbuffers.begin_lock(kVideoBuffer_displayed); + while (it != vbuffers.end(kVideoBuffer_displayed)) + { + VideoFrame* frame = *it; + if (!vdpau->IsBeingUsed(frame)) + { + if (vbuffers.contains(kVideoBuffer_decode, frame)) + { + VERBOSE(VB_PLAYBACK, LOC + QString( + "Frame %1 is in use by avlib and so is " + "being held for later discarding.") + .arg(DebugString(frame, true))); + } + else + { + vbuffers.RemoveInheritence(frame); + vbuffers.safeEnqueue(kVideoBuffer_avail, frame); + vbuffers.end_lock(); + it = vbuffers.begin_lock(kVideoBuffer_displayed); + continue; + } + } + ++it; + } + vbuffers.end_lock(); + return; + } +#endif // USING_VDPAU #ifdef USING_XVMC frame_queue_t::iterator it; @@ -3977,6 +4394,9 @@ bool VideoOutputXv::IsDisplaying(VideoFrame* frame) { (void)frame; + if (!frame) + return false; + #ifdef USING_XVMC xvmc_render_state_t *render = GetRender(frame); if (render) @@ -4152,40 +4572,64 @@ int VideoOutputXv::DisplayOSD(VideoFrame *frame, OSD *osd, int stride, int revision) { - if (!gl_use_osd_opengl2) + if (!gl_use_osd_opengl2 && !vdpau_use_osd) return VideoOutput::DisplayOSD(frame, osd, stride, revision); gl_osd_ready = false; - if (!osd || !gl_osdchain) + if (!osd) return -1; - if (vsz_enabled && gl_videochain) - gl_videochain->SetVideoResize(vsz_desired_display_rect); - OSDSurface *surface = osd->Display(); if (!surface) + { +#ifdef USING_VDPAU + if (vdpau) + vdpau->DisableOSD(); +#endif return -1; - - gl_osd_ready = true; + } bool changed = (-1 == revision) ? surface->Changed() : (surface->GetRevision()!=revision); + if (gl_use_osd_opengl2) + gl_osd_ready = true; + if (changed) { QSize visible = GetTotalOSDBounds().size(); - int offsets[3] = + if (vdpau_use_osd) { - surface->y - surface->yuvbuffer, - surface->u - surface->yuvbuffer, - surface->v - surface->yuvbuffer, - }; - gl_osdchain->UpdateInput(surface->yuvbuffer, offsets, - 0, FMT_YV12, visible); - gl_osdchain->UpdateInput(surface->alpha, offsets, - 3, FMT_ALPHA, visible); +#ifdef USING_VDPAU + if (!vdpau) + return -1; + + void *offsets[3], *alpha[1]; + offsets[0] = surface->y; + offsets[1] = surface->u; + offsets[2] = surface->v; + alpha[0] = surface->alpha; + vdpau->UpdateOSD(offsets, visible, alpha); +#endif // USING_VDPAU + } + else if (gl_use_osd_opengl2) + { + if (!gl_osdchain) + return -1; + + if (vsz_enabled && gl_videochain) + gl_videochain->SetVideoResize(vsz_desired_display_rect); + + int offsets[3]; + offsets[0] = surface->y - surface->yuvbuffer; + offsets[1] = surface->u - surface->yuvbuffer; + offsets[2] = surface->v - surface->yuvbuffer; + + gl_osdchain->UpdateInput(surface->yuvbuffer, offsets, + FMT_YV12, visible, surface->alpha); + } } return changed; } @@ -4202,7 +4646,12 @@ if (!disp) return list; - list = allowed_video_renderers(myth_codec_id, disp); + X11L; + int screen = DefaultScreen(disp); + Window window = DefaultRootWindow(disp); + X11U; + + list = allowed_video_renderers(myth_codec_id, disp, screen, window); XCloseDisplay(disp); @@ -4210,7 +4659,8 @@ } static void SetFromEnv(bool &useXvVLD, bool &useXvIDCT, bool &useXvMC, - bool &useXVideo, bool &useShm, bool &useOpenGL) + bool &useXVideo, bool &useShm, bool &useOpenGL, + bool &useVDPAU) { // can be used to force non-Xv mode as well as non-Xv/non-Shm mode if (getenv("NO_XVMC_VLD")) @@ -4225,13 +4675,21 @@ useXVideo = useShm = false; if (getenv("NO_OPENGL")) useOpenGL = false; + if (getenv("NO_VDPAU")) + useVDPAU = false; } static void SetFromHW(Display *d, - bool &useXvMC, bool &useXVideo, - bool &useShm, bool &useXvMCOpenGL, - bool &useOpenGL) -{ + int screen, Window curwin, + bool &useXvMC, bool &useXVideo, + bool &useShm, bool &useXvMCOpenGL, + bool &useOpenGL, bool &useVDPAU, + MythCodecID vdpau_codec_id) +{ + (void)screen; + (void)d; + (void)curwin; + (void) vdpau_codec_id; // find out about XvMC support if (useXvMC) { @@ -4294,21 +4752,52 @@ useOpenGL = OpenGLContext::IsGLXSupported(d, 1, 2); #endif // USING_OPENGL_VIDEO } + + if (useVDPAU) + { + useVDPAU = false; +#ifdef USING_VDPAU + if (vdpau_codec_id < kCodec_NORMAL_END) + { + useVDPAU = true; + } + else + { + useVDPAU = VDPAUContext::CheckCodecSupported(vdpau_codec_id); + } + if (useVDPAU) + { + // N.B. This only confirms another VDPAU context can be created. + // Creating a second hardware decoder will still fail (180.25) + // e.g. when attempting to use PBP. + VDPAUContext *c = new VDPAUContext(); + useVDPAU = c->Init(d, screen, curwin, QSize(1920,1200), + false, vdpau_codec_id); + c->Deinit(); + delete c; + } + +#endif // USING_VDPAU + } } -static QStringList allowed_video_renderers(MythCodecID myth_codec_id, - Display *XJ_disp) +static QStringList allowed_video_renderers( + MythCodecID myth_codec_id, Display *display, int screen, Window curwin) { - bool vld, idct, mc, xv, shm, xvmc_opengl, opengl; + bool vld, idct, mc, xv, shm, xvmc_opengl, opengl, vdpau; - myth2av_codecid(myth_codec_id, vld, idct, mc); + myth2av_codecid(myth_codec_id, vld, idct, mc, vdpau); + + // allow vdpau rendering for software decode + if (myth_codec_id < kCodec_NORMAL_END) + vdpau = true; opengl = xv = shm = !vld && !idct; xvmc_opengl = vld || idct || mc; - SetFromEnv(vld, idct, mc, xv, shm, opengl); - SetFromHW(XJ_disp, mc, xv, shm, xvmc_opengl, opengl); - + SetFromEnv(vld, idct, mc, xv, shm, opengl, vdpau); + SetFromHW(display, screen, curwin, mc, xv, shm, xvmc_opengl, + opengl, vdpau, myth_codec_id); idct &= mc; QStringList list; @@ -4320,8 +4809,15 @@ list += "xv-blit"; if (shm) list += "xshm"; + if (vdpau) + list += "vdpau"; list += "xlib"; } + else if ((kCodec_VDPAU_BEGIN < myth_codec_id) && + (myth_codec_id < kCodec_VDPAU_END) && vdpau) + { + list += "vdpau"; + } else { if (vld || idct || mc) diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/videoout_xv.h mythtv/libs/libmythtv/videoout_xv.h --- mythtv.ori/libs/libmythtv/videoout_xv.h 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libmythtv/videoout_xv.h 2009-02-10 14:01:34.000000000 +1100 @@ -47,9 +47,11 @@ typedef enum VideoOutputSubType { - XVUnknown = 0, Xlib, XShm, OpenGL, XVideo, XVideoMC, XVideoIDCT, XVideoVLD, + XVUnknown = 0, Xlib, XShm, OpenGL, XVideo, XVideoVDPAU, + XVideoMC, XVideoIDCT, XVideoVLD, } VOSType; +class VDPAUContext; class VideoOutputXv : public VideoOutput { friend class ChromaKeyOSD; @@ -127,8 +129,10 @@ static QStringList GetAllowedRenderers(MythCodecID myth_codec_id, const QSize &video_dim); - private: VOSType VideoOutputSubType() const { return video_output_subtype; } + void SetNextFrameDisplayTimeOffset(int delayus); + + private: virtual QRect GetVisibleOSDBounds(float&, float&, float) const; virtual QRect GetTotalOSDBounds(void) const; @@ -137,6 +141,8 @@ void DiscardFrames(bool next_frame_keyframe); void DoneDisplayingFrame(void); + void ProcessFrameVDPAU(VideoFrame *frame, OSD *osd, + NuppelVideoPlayer *pipPlayer); void ProcessFrameXvMC(VideoFrame *frame, OSD *osd); void ProcessFrameOpenGL(VideoFrame *frame, OSD *osd, FilterChain *filterList, @@ -145,11 +151,13 @@ FilterChain *filterList, NuppelVideoPlayer *pipPlayer); + void PrepareFrameVDPAU(VideoFrame *, FrameScanType); void PrepareFrameXvMC(VideoFrame *, FrameScanType); void PrepareFrameXv(VideoFrame *); void PrepareFrameOpenGL(VideoFrame *, FrameScanType); void PrepareFrameMem(VideoFrame *, FrameScanType); + void ShowVDPAU(FrameScanType scan); void ShowXvMC(FrameScanType scan); void ShowXVideo(FrameScanType scan); @@ -162,16 +170,19 @@ void InitColorKey(bool turnoffautopaint); bool InitVideoBuffers(MythCodecID, bool use_xv, - bool use_shm, bool use_opengl); + bool use_shm, bool use_opengl, + bool use_vdpau); bool InitXvMC(MythCodecID); bool InitXVideo(void); bool InitOpenGL(void); bool InitXShm(void); bool InitXlib(void); + bool InitVDPAU(MythCodecID); bool InitOSD(const QString&); bool CheckOSDInit(void); + bool CreateVDPAUBuffers(void); bool CreateXvMCBuffers(void); bool CreateBuffers(VOSType subtype); vector CreateXvMCSurfaces(uint num, bool surface_has_vld); @@ -199,6 +210,10 @@ bool SetupDeinterlaceOpenGL( bool interlaced, const QString &overridefilter); + // VDPAU specific helper functions + bool SetDeinterlacingEnabledVDPAU(bool enable); + bool SetupDeinterlaceVDPAU( + bool interlaced, const QString &overridefilter); // Misc. MythCodecID myth_codec_id; @@ -251,6 +266,13 @@ // Support for nVidia XvMC copy to texture feature XvMCTextures *xvmc_tex; +#ifdef USING_VDPAU + VDPAUContext *vdpau; +#endif + bool vdpau_use_osd; + bool vdpau_use_pip; + bool vdpau_use_colorkey; + // Basic Xv drawing info int xv_port; int xv_hue_base; diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/videooutbase.cpp mythtv/libs/libmythtv/videooutbase.cpp --- mythtv.ori/libs/libmythtv/videooutbase.cpp 2009-01-22 17:04:46.000000000 +1100 +++ mythtv/libs/libmythtv/videooutbase.cpp 2009-02-10 14:01:34.000000000 +1100 @@ -110,7 +110,7 @@ to_comma_list(renderers)); QString renderer = QString::null; - if (renderers.size() > 1) + if (renderers.size() > 0) { VideoDisplayProfile vprof; vprof.SetInput(video_dim); @@ -562,7 +562,8 @@ // Default to not supporting bob deinterlace return (!filtername.contains("bobdeint") && !filtername.contains("doublerate") && - !filtername.contains("opengl")); + !filtername.contains("opengl") && + !filtername.contains("vdpau")); } /** diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/vsync.cpp mythtv/libs/libmythtv/vsync.cpp --- mythtv.ori/libs/libmythtv/vsync.cpp 2009-01-22 12:22:02.000000000 +1100 +++ mythtv/libs/libmythtv/vsync.cpp 2009-02-10 14:01:34.000000000 +1100 @@ -89,7 +89,10 @@ skip = m_forceskip; m_forceskip = 0; } - + +#ifdef USING_VDPAU +// TESTVIDEOSYNC(VDPAUVideoSync); +#endif #ifndef _WIN32 TESTVIDEOSYNC(nVidiaVideoSync); TESTVIDEOSYNC(DRMVideoSync); @@ -205,6 +208,7 @@ m_nexttrigger.tv_usec = now.tv_usec; OffsetTimeval(m_nexttrigger, ret_val); } + return ret_val; } @@ -757,7 +761,7 @@ unsigned long rtcdata; while (m_delay > 0) { - read(m_rtcfd, &rtcdata, sizeof(rtcdata)); + (void)read(m_rtcfd, &rtcdata, sizeof(rtcdata)); m_delay = CalcDelay(); } } @@ -768,6 +772,49 @@ } #endif /* __linux__ */ +#ifdef USING_VDPAU +VDPAUVideoSync::VDPAUVideoSync(VideoOutput *vo, + int fr, int ri, bool intl) : + VideoSync(vo, fr, ri, intl) +{ +} + +VDPAUVideoSync::~VDPAUVideoSync() +{ +} + +bool VDPAUVideoSync::TryInit(void) +{ + VideoOutputXv *vo = dynamic_cast(m_video_output); + if (!vo) + return false; + + if (vo->VideoOutputSubType() != XVideoVDPAU) + return false; + + return true; +} + +void VDPAUVideoSync::WaitForFrame(int sync_delay) +{ + // Offset for externally-provided A/V sync delay + OffsetTimeval(m_nexttrigger, sync_delay); + m_delay = CalcDelay(); + + if (m_delay < 0) + m_delay = 0; + + VideoOutputXv *vo = (VideoOutputXv *)(m_video_output); + vo->SetNextFrameDisplayTimeOffset(m_delay); +} + +void VDPAUVideoSync::AdvanceTrigger(void) +{ + UpdateNexttrigger(); +} + +#endif + BusyWaitVideoSync::BusyWaitVideoSync(VideoOutput *vo, int fr, int ri, bool intl) : VideoSync(vo, fr, ri, intl) diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/vsync.h mythtv/libs/libmythtv/vsync.h --- mythtv.ori/libs/libmythtv/vsync.h 2009-01-22 12:22:03.000000000 +1100 +++ mythtv/libs/libmythtv/vsync.h 2009-02-10 14:01:34.000000000 +1100 @@ -252,6 +252,27 @@ }; #endif +#ifdef USING_VDPAU +/** \brief Video synchronization class employing VDPAU + */ +class VDPAUVideoSync : public VideoSync +{ + public: + VDPAUVideoSync(VideoOutput*, + int frame_interval, int refresh_interval, + bool interlaced); + ~VDPAUVideoSync(); + + QString getName(void) const { return QString("VDPAU"); } + bool TryInit(void); + void WaitForFrame(int sync_delay); + void AdvanceTrigger(void); + + private: +}; + +#endif + /** \brief Video synchronization classes employing usleep() and busy-waits. * * Non-phase-maintaining. There may occasionally be short periods diff -Naur --exclude=.svn mythtv.ori/libs/libmythtv/xvmctextures.cpp mythtv/libs/libmythtv/xvmctextures.cpp --- mythtv.ori/libs/libmythtv/xvmctextures.cpp 2009-01-23 16:38:27.000000000 +1100 +++ mythtv/libs/libmythtv/xvmctextures.cpp 2009-02-10 14:00:22.000000000 +1100 @@ -88,7 +88,7 @@ XVisualInfo *vis_info; vis_info = glXGetVisualFromFBConfig(XJ_disp, glx_fbconfig); gl_window = get_gl_window(XJ_disp, XJ_curwin, vis_info, - window_size, true); + QRect(QPoint(0,0), window_size)); glx_window = get_glx_window(XJ_disp, glx_fbconfig, gl_window, glx_context, glx_pbuffer, window_size);