* Have rewritten the yuy2->rgb converter to avoid license problems.

So now, emotion_xine supports yuy2-encoded movies (such as .wmv).


SVN revision: 15678
This commit is contained in:
moom16 2005-07-08 23:08:48 +00:00 committed by moom16
parent 8e204f7f56
commit ee5135f71f
5 changed files with 165 additions and 32 deletions

View File

@ -15,8 +15,16 @@
#define META_TRACK_YEAR 6 #define META_TRACK_YEAR 6
#define META_TRACK_DISCID 7 #define META_TRACK_DISCID 7
typedef enum _Emotion_Format Emotion_Format;
typedef struct _Emotion_Video_Module Emotion_Video_Module; typedef struct _Emotion_Video_Module Emotion_Video_Module;
enum _Emotion_Format
{
EMOTION_YV12,
EMOTION_YUY2, /* unused for now since evas does not support yuy2 format */
EMOTION_BGRA
};
struct _Emotion_Video_Module struct _Emotion_Video_Module
{ {
unsigned char (*init) (Evas_Object *obj, void **video); unsigned char (*init) (Evas_Object *obj, void **video);
@ -35,8 +43,10 @@ struct _Emotion_Video_Module
int (*audio_handled) (void *ef); int (*audio_handled) (void *ef);
int (*seekable) (void *ef); int (*seekable) (void *ef);
void (*frame_done) (void *ef); void (*frame_done) (void *ef);
void (*yuv_size_get) (void *ef, int *w, int *h); Emotion_Format (*format_get) (void *ef);
void (*video_data_size_get) (void *ef, int *w, int *h);
int (*yuv_rows_get) (void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows); int (*yuv_rows_get) (void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows);
int (*bgra_data_get) (void *ef, unsigned char **bgra_data);
void (*event_feed) (void *ef, int event); void (*event_feed) (void *ef, int event);
void (*event_mouse_button_feed) (void *ef, int button, int x, int y); void (*event_mouse_button_feed) (void *ef, int button, int x, int y);
void (*event_mouse_move_feed) (void *ef, int x, int y); void (*event_mouse_move_feed) (void *ef, int x, int y);

View File

@ -982,42 +982,55 @@ _pos_set_job(void *data)
static void static void
_pixels_get(void *data, Evas_Object *obj) _pixels_get(void *data, Evas_Object *obj)
{ {
Evas_Pixel_Import_Source ps;
int iw, ih, w, h; int iw, ih, w, h;
int i;
unsigned char **rows;
Smart_Data *sd; Smart_Data *sd;
Emotion_Format format;
sd = data; sd = data;
evas_object_image_size_get(obj, &iw, &ih); evas_object_image_size_get(obj, &iw, &ih);
sd->module->yuv_size_get(sd->video, &w, &h); sd->module->video_data_size_get(sd->video, &w, &h);
if ((w != iw) || (h != ih)) if ((w != iw) || (h != ih))
{ {
evas_object_image_size_set(obj, w, h); evas_object_image_size_set(obj, w, h);
iw = w; iw = w;
ih = h; ih = h;
} }
ps.format = EVAS_PIXEL_FORMAT_YUV420P_601; format = sd->module->format_get(sd->video);
ps.w = iw; if (format == EMOTION_YV12)
ps.h = ih;
ps.rows = malloc(ps.h * 2 * sizeof(void *));
if (!ps.rows)
{ {
sd->module->frame_done(sd->video); unsigned char **rows;
return; Evas_Pixel_Import_Source ps;
ps.format = EVAS_PIXEL_FORMAT_YUV420P_601;
ps.w = iw;
ps.h = ih;
ps.rows = malloc(ps.h * 2 * sizeof(void *));
if (!ps.rows)
{
sd->module->frame_done(sd->video);
return;
}
rows = (unsigned char **)ps.rows;
if (sd->module->yuv_rows_get(sd->video, iw, ih,
rows,
&rows[ps.h],
&rows[ps.h + (ps.h / 2)]))
evas_object_image_pixels_import(obj, &ps);
evas_object_image_pixels_dirty_set(obj, 0);
free(ps.rows);
}
else if (format == EMOTION_BGRA)
{
unsigned char *bgra_data;
if (sd->module->bgra_data_get(sd->video, &bgra_data));
{
evas_object_image_data_set(obj, bgra_data);
}
} }
rows = (unsigned char **)ps.rows;
if (sd->module->yuv_rows_get(sd->video, iw, ih,
rows,
&rows[ps.h],
&rows[ps.h + (ps.h / 2)]))
evas_object_image_pixels_import(obj, &ps);
evas_object_image_pixels_dirty_set(obj, 0);
free(ps.rows);
sd->module->frame_done(sd->video); sd->module->frame_done(sd->video);
} }

View File

@ -16,8 +16,10 @@ static double em_pos_get(void *ef);
static double em_ratio_get(void *ef); static double em_ratio_get(void *ef);
static int em_seekable(void *ef); static int em_seekable(void *ef);
static void em_frame_done(void *ef); static void em_frame_done(void *ef);
static void em_yuv_size_get(void *ef, int *w, int *h); static Emotion_Format em_format_get(void *ef);
static void em_video_data_size_get(void *ef, int *w, int *h);
static int em_yuv_rows_get(void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows); static int em_yuv_rows_get(void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows);
static int em_bgra_data_get(void *ef, unsigned char **bgra_data);
static void em_event_feed(void *ef, int event); static void em_event_feed(void *ef, int event);
static void em_event_mouse_button_feed(void *ef, int button, int x, int y); static void em_event_mouse_button_feed(void *ef, int button, int x, int y);
static void em_event_mouse_move_feed(void *ef, int x, int y); static void em_event_mouse_move_feed(void *ef, int x, int y);
@ -544,8 +546,21 @@ em_frame_done(void *ef)
} }
} }
static Emotion_Format em_format_get(void *ef)
{
Emotion_Xine_Video *ev;
Emotion_Xine_Video_Frame *fr;
ev = (Emotion_Xine_Video *)ef;
fr = ev->cur_frame;
if (fr)
return fr->format;
return EMOTION_YV12;
}
static void static void
em_yuv_size_get(void *ef, int *w, int *h) em_video_data_size_get(void *ef, int *w, int *h)
{ {
Emotion_Xine_Video *ev; Emotion_Xine_Video *ev;
Emotion_Xine_Video_Frame *fr; Emotion_Xine_Video_Frame *fr;
@ -583,6 +598,23 @@ em_yuv_rows_get(void *ef, int w, int h, unsigned char **yrows, unsigned char **u
return 0; return 0;
} }
static int
em_bgra_data_get(void *ef, unsigned char **bgra_data)
{
Emotion_Xine_Video *ev;
Emotion_Xine_Video_Frame *fr;
ev = (Emotion_Xine_Video *)ef;
fr = ev->cur_frame;
if (!fr) return 0;
if (fr->bgra_data)
{
*bgra_data = fr->bgra_data;
return 1;
}
return 0;
}
static void static void
em_event_feed(void *ef, int event) em_event_feed(void *ef, int event)
{ {
@ -1337,8 +1369,10 @@ static Emotion_Video_Module em_module =
em_audio_handled, /* audio_handled */ em_audio_handled, /* audio_handled */
em_seekable, /* seekable */ em_seekable, /* seekable */
em_frame_done, /* frame_done */ em_frame_done, /* frame_done */
em_yuv_size_get, /* yuv_size_get */ em_format_get, /* format_get */
em_video_data_size_get, /* video_data_size_get */
em_yuv_rows_get, /* yuv_rows_get */ em_yuv_rows_get, /* yuv_rows_get */
em_bgra_data_get, /* bgra_data_get */
em_event_feed, /* event_feed */ em_event_feed, /* event_feed */
em_event_mouse_button_feed, /* event_mouse_button_feed */ em_event_mouse_button_feed, /* event_mouse_button_feed */
em_event_mouse_move_feed, /* event_mouse_move_feed */ em_event_mouse_move_feed, /* event_mouse_move_feed */

View File

@ -57,7 +57,9 @@ struct _Emotion_Xine_Video_Frame
{ {
int w, h; int w, h;
double ratio; double ratio;
Emotion_Format format;
unsigned char *y, *u, *v; unsigned char *y, *u, *v;
unsigned char *bgra_data;
int y_stride, u_stride, v_stride; int y_stride, u_stride, v_stride;
Evas_Object *obj; Evas_Object *obj;
double timestamp; double timestamp;

View File

@ -94,6 +94,8 @@ static void _emotion_overlay_blend (vo_driver_t *vo_driver, vo_f
static void _emotion_overlay_mem_blend_8 (uint8_t *mem, uint8_t val, uint8_t o, size_t sz); static void _emotion_overlay_mem_blend_8 (uint8_t *mem, uint8_t val, uint8_t o, size_t sz);
static void _emotion_overlay_blend_yuv (uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3]); static void _emotion_overlay_blend_yuv (uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3]);
static void _emotion_yuy2_to_bgra32 (int width, int height, unsigned char *src, unsigned char *dst);
/***************************************************************************/ /***************************************************************************/
static vo_info_t _emotion_info = static vo_info_t _emotion_info =
{ {
@ -215,7 +217,7 @@ _emotion_capabilities_get(vo_driver_t *vo_driver)
dv = (Emotion_Driver *)vo_driver; dv = (Emotion_Driver *)vo_driver;
// printf("emotion: _emotion_capabilities_get()\n"); // printf("emotion: _emotion_capabilities_get()\n");
return VO_CAP_YV12; return VO_CAP_YV12 | VO_CAP_YUY2;
} }
/***************************************************************************/ /***************************************************************************/
@ -358,6 +360,7 @@ _emotion_frame_format_update(vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint3
{ {
int y_size, uv_size; int y_size, uv_size;
fr->frame.format = EMOTION_YV12;
fr->vo_frame.pitches[0] = 8 * ((width + 7) / 8); fr->vo_frame.pitches[0] = 8 * ((width + 7) / 8);
fr->vo_frame.pitches[1] = 8 * ((width + 15) / 16); fr->vo_frame.pitches[1] = 8 * ((width + 15) / 16);
fr->vo_frame.pitches[2] = 8 * ((width + 15) / 16); fr->vo_frame.pitches[2] = 8 * ((width + 15) / 16);
@ -374,19 +377,49 @@ _emotion_frame_format_update(vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint3
fr->frame.y = fr->vo_frame.base[0]; fr->frame.y = fr->vo_frame.base[0];
fr->frame.u = fr->vo_frame.base[1]; fr->frame.u = fr->vo_frame.base[1];
fr->frame.v = fr->vo_frame.base[2]; fr->frame.v = fr->vo_frame.base[2];
fr->frame.bgra_data = NULL;
fr->frame.y_stride = fr->vo_frame.pitches[0]; fr->frame.y_stride = fr->vo_frame.pitches[0];
fr->frame.u_stride = fr->vo_frame.pitches[1]; fr->frame.u_stride = fr->vo_frame.pitches[1];
fr->frame.v_stride = fr->vo_frame.pitches[2]; fr->frame.v_stride = fr->vo_frame.pitches[2];
fr->frame.obj = dv->ev->obj; fr->frame.obj = dv->ev->obj;
} }
break; break;
case XINE_IMGFMT_YUY2:
{
int y_size, uv_size;
fr->frame.format = EMOTION_BGRA;
fr->vo_frame.pitches[0] = 8 * ((width + 3) / 4);
fr->vo_frame.pitches[1] = 0;
fr->vo_frame.pitches[2] = 0;
fr->vo_frame.base[0] = malloc(fr->vo_frame.pitches[0] * height);
fr->vo_frame.base[1] = NULL;
fr->vo_frame.base[2] = NULL;
fr->frame.w = fr->width;
fr->frame.h = fr->height;
fr->frame.ratio = fr->vo_frame.ratio;
fr->frame.y = NULL;
fr->frame.u = NULL;
fr->frame.v = NULL;
fr->frame.bgra_data = malloc(fr->width * fr->height * 4);
fr->frame.y_stride = 0;
fr->frame.u_stride = 0;
fr->frame.v_stride = 0;
fr->frame.obj = dv->ev->obj;
}
break;
default: default:
break; break;
} }
if (((format == XINE_IMGFMT_YV12) if (((format == XINE_IMGFMT_YV12)
&& ((fr->vo_frame.base[0] == NULL) && ((fr->vo_frame.base[0] == NULL)
|| (fr->vo_frame.base[1] == NULL) || (fr->vo_frame.base[1] == NULL)
|| (fr->vo_frame.base[2] == NULL)))) || (fr->vo_frame.base[2] == NULL)))
|| ((format == XINE_IMGFMT_YUY2)
&& ((fr->vo_frame.base[0] == NULL)
|| (fr->frame.bgra_data == NULL))))
{ {
_emotion_frame_data_free(fr); _emotion_frame_data_free(fr);
} }
@ -408,6 +441,11 @@ _emotion_frame_display(vo_driver_t *vo_driver, vo_frame_t *vo_frame)
{ {
void *buf; void *buf;
int ret; int ret;
if (fr->format == XINE_IMGFMT_YUY2)
{
_emotion_yuy2_to_bgra32(fr->width, fr->height, fr->vo_frame.base[0], fr->frame.bgra_data);
}
buf = &(fr->frame); buf = &(fr->frame);
fr->frame.timestamp = (double)fr->vo_frame.vpts / 90000.0; fr->frame.timestamp = (double)fr->vo_frame.vpts / 90000.0;
@ -444,6 +482,11 @@ _emotion_frame_data_free(Emotion_Frame *fr)
fr->frame.u = fr->vo_frame.base[1]; fr->frame.u = fr->vo_frame.base[1];
fr->frame.v = fr->vo_frame.base[2]; fr->frame.v = fr->vo_frame.base[2];
} }
if (fr->frame.bgra_data)
{
free(fr->frame.bgra_data);
fr->frame.bgra_data = NULL;
}
} }
static void static void
@ -672,3 +715,34 @@ static void _emotion_overlay_blend_yuv(uint8_t *dst_base[3], vo_overlay_t * img_
} }
} }
} }
//TODO: Really need to improve this converter!
#define LIMIT(x) ((x) > 0xff ? 0xff : ((x) < 0 ? 0 : (x)))
static void
_emotion_yuy2_to_bgra32(int width, int height, unsigned char *src, unsigned char *dst)
{
int i, j;
unsigned char *y, *u, *v;
y = src;
u = src + 1;
v = src + 3;
for (i = 0; i < width; i++)
{
for (j = 0; j < height; j++)
{
*dst++ = LIMIT(1.164 * (*y - 16) + 2.018 * (*u - 128));
*dst++ = LIMIT(1.164 * (*y - 16) - 0.813 * (*v - 128) - 0.391 * (*u - 128));
*dst++ = LIMIT(1.164 * (*y - 16) + 1.596 * (*v - 128));
*dst++ = 0;
y += 2;
if (j % 2 == 1)
{
u += 4;
v += 4;
}
}
}
}