summaryrefslogtreecommitdiff
path: root/legacy/emotion/src/modules
diff options
context:
space:
mode:
authormoom16 <moom16>2005-07-08 19:56:48 +0000
committermoom16 <moom16@7cbeb6ba-43b4-40fd-8cce-4c39aea84d33>2005-07-08 19:56:48 +0000
commit8e4e372d878c2a964d9b3ef123c7aa34f0691915 (patch)
tree93f0724f2865b2f5478fd384b8e25063603dd4f6 /legacy/emotion/src/modules
parent0bc77892a096d12a14a5bdda233be7c310fe7670 (diff)
* Add support of yuy2 to emotion_xine module. So now, we can play wmv
movies! I had to make some module API changes: - rename yuv_size_get to video_data_size_get - add the function "Emotion_Format (*format_get) (void *ef);" in Emotion_Video_Module. This function should return the format used by the video (EMOTION_YV12, EMOTION_YUY2 or EMOTION_BGRA). I use a yuy2->bgra converter since evas doesn't support YUY2. This converter should be rewrite since it's really not optimized and since it is under GPLv2 (emotion is under BSD). I added an explicit comment above the function for that, but it'd be better to use a converter under BSD. If it's really incompatible with emotion license, I will remove it. SVN revision: 15675
Diffstat (limited to '')
-rw-r--r--legacy/emotion/src/modules/emotion_xine.c40
-rw-r--r--legacy/emotion/src/modules/emotion_xine.h2
-rw-r--r--legacy/emotion/src/modules/xine/emotion_xine_vo_out.c109
3 files changed, 146 insertions, 5 deletions
diff --git a/legacy/emotion/src/modules/emotion_xine.c b/legacy/emotion/src/modules/emotion_xine.c
index d7c225bd0a..d2c53acd50 100644
--- a/legacy/emotion/src/modules/emotion_xine.c
+++ b/legacy/emotion/src/modules/emotion_xine.c
@@ -16,8 +16,10 @@ static double em_pos_get(void *ef);
16static double em_ratio_get(void *ef); 16static double em_ratio_get(void *ef);
17static int em_seekable(void *ef); 17static int em_seekable(void *ef);
18static void em_frame_done(void *ef); 18static void em_frame_done(void *ef);
19static void em_yuv_size_get(void *ef, int *w, int *h); 19static Emotion_Format em_format_get(void *ef);
20static void em_video_data_size_get(void *ef, int *w, int *h);
20static int em_yuv_rows_get(void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows); 21static int em_yuv_rows_get(void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows);
22static int em_bgra_data_get(void *ef, unsigned char **bgra_data);
21static void em_event_feed(void *ef, int event); 23static void em_event_feed(void *ef, int event);
22static void em_event_mouse_button_feed(void *ef, int button, int x, int y); 24static void em_event_mouse_button_feed(void *ef, int button, int x, int y);
23static void em_event_mouse_move_feed(void *ef, int x, int y); 25static void em_event_mouse_move_feed(void *ef, int x, int y);
@@ -544,8 +546,21 @@ em_frame_done(void *ef)
544 } 546 }
545} 547}
546 548
549static Emotion_Format em_format_get(void *ef)
550{
551 Emotion_Xine_Video *ev;
552 Emotion_Xine_Video_Frame *fr;
553
554 ev = (Emotion_Xine_Video *)ef;
555 fr = ev->cur_frame;
556
557 if (fr)
558 return fr->format;
559 return EMOTION_YV12;
560}
561
547static void 562static void
548em_yuv_size_get(void *ef, int *w, int *h) 563em_video_data_size_get(void *ef, int *w, int *h)
549{ 564{
550 Emotion_Xine_Video *ev; 565 Emotion_Xine_Video *ev;
551 Emotion_Xine_Video_Frame *fr; 566 Emotion_Xine_Video_Frame *fr;
@@ -583,6 +598,23 @@ em_yuv_rows_get(void *ef, int w, int h, unsigned char **yrows, unsigned char **u
583 return 0; 598 return 0;
584} 599}
585 600
601static int
602em_bgra_data_get(void *ef, unsigned char **bgra_data)
603{
604 Emotion_Xine_Video *ev;
605 Emotion_Xine_Video_Frame *fr;
606
607 ev = (Emotion_Xine_Video *)ef;
608 fr = ev->cur_frame;
609 if (!fr) return 0;
610 if (fr->bgra_data)
611 {
612 *bgra_data = fr->bgra_data;
613 return 1;
614 }
615 return 0;
616}
617
586static void 618static void
587em_event_feed(void *ef, int event) 619em_event_feed(void *ef, int event)
588{ 620{
@@ -1337,8 +1369,10 @@ static Emotion_Video_Module em_module =
1337 em_audio_handled, /* audio_handled */ 1369 em_audio_handled, /* audio_handled */
1338 em_seekable, /* seekable */ 1370 em_seekable, /* seekable */
1339 em_frame_done, /* frame_done */ 1371 em_frame_done, /* frame_done */
1340 em_yuv_size_get, /* yuv_size_get */ 1372 em_format_get, /* format_get */
1373 em_video_data_size_get, /* video_data_size_get */
1341 em_yuv_rows_get, /* yuv_rows_get */ 1374 em_yuv_rows_get, /* yuv_rows_get */
1375 em_bgra_data_get, /* bgra_data_get */
1342 em_event_feed, /* event_feed */ 1376 em_event_feed, /* event_feed */
1343 em_event_mouse_button_feed, /* event_mouse_button_feed */ 1377 em_event_mouse_button_feed, /* event_mouse_button_feed */
1344 em_event_mouse_move_feed, /* event_mouse_move_feed */ 1378 em_event_mouse_move_feed, /* event_mouse_move_feed */
diff --git a/legacy/emotion/src/modules/emotion_xine.h b/legacy/emotion/src/modules/emotion_xine.h
index f796bc9369..47bab1a73a 100644
--- a/legacy/emotion/src/modules/emotion_xine.h
+++ b/legacy/emotion/src/modules/emotion_xine.h
@@ -57,7 +57,9 @@ struct _Emotion_Xine_Video_Frame
57{ 57{
58 int w, h; 58 int w, h;
59 double ratio; 59 double ratio;
60 Emotion_Format format;
60 unsigned char *y, *u, *v; 61 unsigned char *y, *u, *v;
62 unsigned char *bgra_data;
61 int y_stride, u_stride, v_stride; 63 int y_stride, u_stride, v_stride;
62 Evas_Object *obj; 64 Evas_Object *obj;
63 double timestamp; 65 double timestamp;
diff --git a/legacy/emotion/src/modules/xine/emotion_xine_vo_out.c b/legacy/emotion/src/modules/xine/emotion_xine_vo_out.c
index 391f9acfd1..3f90fff7ba 100644
--- a/legacy/emotion/src/modules/xine/emotion_xine_vo_out.c
+++ b/legacy/emotion/src/modules/xine/emotion_xine_vo_out.c
@@ -94,6 +94,8 @@ static void _emotion_overlay_blend (vo_driver_t *vo_driver, vo_f
94static void _emotion_overlay_mem_blend_8 (uint8_t *mem, uint8_t val, uint8_t o, size_t sz); 94static void _emotion_overlay_mem_blend_8 (uint8_t *mem, uint8_t val, uint8_t o, size_t sz);
95static void _emotion_overlay_blend_yuv (uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3]); 95static void _emotion_overlay_blend_yuv (uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3]);
96 96
97static void _emotion_yuy2_to_bgra32 (int width, int height, unsigned char *src, unsigned char *dst);
98
97/***************************************************************************/ 99/***************************************************************************/
98static vo_info_t _emotion_info = 100static vo_info_t _emotion_info =
99{ 101{
@@ -215,7 +217,7 @@ _emotion_capabilities_get(vo_driver_t *vo_driver)
215 217
216 dv = (Emotion_Driver *)vo_driver; 218 dv = (Emotion_Driver *)vo_driver;
217// printf("emotion: _emotion_capabilities_get()\n"); 219// printf("emotion: _emotion_capabilities_get()\n");
218 return VO_CAP_YV12; 220 return VO_CAP_YV12 | VO_CAP_YUY2;
219} 221}
220 222
221/***************************************************************************/ 223/***************************************************************************/
@@ -358,6 +360,7 @@ _emotion_frame_format_update(vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint3
358 { 360 {
359 int y_size, uv_size; 361 int y_size, uv_size;
360 362
363 fr->frame.format = EMOTION_YV12;
361 fr->vo_frame.pitches[0] = 8 * ((width + 7) / 8); 364 fr->vo_frame.pitches[0] = 8 * ((width + 7) / 8);
362 fr->vo_frame.pitches[1] = 8 * ((width + 15) / 16); 365 fr->vo_frame.pitches[1] = 8 * ((width + 15) / 16);
363 fr->vo_frame.pitches[2] = 8 * ((width + 15) / 16); 366 fr->vo_frame.pitches[2] = 8 * ((width + 15) / 16);
@@ -374,19 +377,49 @@ _emotion_frame_format_update(vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint3
374 fr->frame.y = fr->vo_frame.base[0]; 377 fr->frame.y = fr->vo_frame.base[0];
375 fr->frame.u = fr->vo_frame.base[1]; 378 fr->frame.u = fr->vo_frame.base[1];
376 fr->frame.v = fr->vo_frame.base[2]; 379 fr->frame.v = fr->vo_frame.base[2];
380 fr->frame.bgra_data = NULL;
377 fr->frame.y_stride = fr->vo_frame.pitches[0]; 381 fr->frame.y_stride = fr->vo_frame.pitches[0];
378 fr->frame.u_stride = fr->vo_frame.pitches[1]; 382 fr->frame.u_stride = fr->vo_frame.pitches[1];
379 fr->frame.v_stride = fr->vo_frame.pitches[2]; 383 fr->frame.v_stride = fr->vo_frame.pitches[2];
380 fr->frame.obj = dv->ev->obj; 384 fr->frame.obj = dv->ev->obj;
381 } 385 }
382 break; 386 break;
387 case XINE_IMGFMT_YUY2:
388 {
389 int y_size, uv_size;
390
391 fr->frame.format = EMOTION_BGRA;
392 fr->vo_frame.pitches[0] = 8 * ((width + 3) / 4);
393 fr->vo_frame.pitches[1] = 0;
394 fr->vo_frame.pitches[2] = 0;
395
396 fr->vo_frame.base[0] = malloc(fr->vo_frame.pitches[0] * height);
397 fr->vo_frame.base[1] = NULL;
398 fr->vo_frame.base[2] = NULL;
399
400 fr->frame.w = fr->width;
401 fr->frame.h = fr->height;
402 fr->frame.ratio = fr->vo_frame.ratio;
403 fr->frame.y = NULL;
404 fr->frame.u = NULL;
405 fr->frame.v = NULL;
406 fr->frame.bgra_data = malloc(fr->width * fr->height * 4);
407 fr->frame.y_stride = 0;
408 fr->frame.u_stride = 0;
409 fr->frame.v_stride = 0;
410 fr->frame.obj = dv->ev->obj;
411 }
412 break;
383 default: 413 default:
384 break; 414 break;
385 } 415 }
386 if (((format == XINE_IMGFMT_YV12) 416 if (((format == XINE_IMGFMT_YV12)
387 && ((fr->vo_frame.base[0] == NULL) 417 && ((fr->vo_frame.base[0] == NULL)
388 || (fr->vo_frame.base[1] == NULL) 418 || (fr->vo_frame.base[1] == NULL)
389 || (fr->vo_frame.base[2] == NULL)))) 419 || (fr->vo_frame.base[2] == NULL)))
420 || ((format == XINE_IMGFMT_YUY2)
421 && ((fr->vo_frame.base[0] == NULL)
422 || (fr->frame.bgra_data == NULL))))
390 { 423 {
391 _emotion_frame_data_free(fr); 424 _emotion_frame_data_free(fr);
392 } 425 }
@@ -408,6 +441,11 @@ _emotion_frame_display(vo_driver_t *vo_driver, vo_frame_t *vo_frame)
408 { 441 {
409 void *buf; 442 void *buf;
410 int ret; 443 int ret;
444
445 if (fr->format == XINE_IMGFMT_YUY2)
446 {
447 _emotion_yuy2_to_bgra32(fr->width, fr->height, fr->vo_frame.base[0], fr->frame.bgra_data);
448 }
411 449
412 buf = &(fr->frame); 450 buf = &(fr->frame);
413 fr->frame.timestamp = (double)fr->vo_frame.vpts / 90000.0; 451 fr->frame.timestamp = (double)fr->vo_frame.vpts / 90000.0;
@@ -444,6 +482,11 @@ _emotion_frame_data_free(Emotion_Frame *fr)
444 fr->frame.u = fr->vo_frame.base[1]; 482 fr->frame.u = fr->vo_frame.base[1];
445 fr->frame.v = fr->vo_frame.base[2]; 483 fr->frame.v = fr->vo_frame.base[2];
446 } 484 }
485 if (fr->frame.bgra_data)
486 {
487 free(fr->frame.bgra_data);
488 fr->frame.bgra_data = NULL;
489 }
447} 490}
448 491
449static void 492static void
@@ -672,3 +715,65 @@ static void _emotion_overlay_blend_yuv(uint8_t *dst_base[3], vo_overlay_t * img_
672 } 715 }
673 } 716 }
674} 717}
718
719/*MoOm:
720* yuy2 to bgra converter taken from vgrabbj (http://vgrabbj.gecius.de)
721* This code is under GPLv2. Copyright Jens Gecius.
722* If it causes problem with emotion BSD license, tell me, I'll remove it!
723* TODO: Really need to improve this converter!
724*/
725#define LIMIT(x) ((x) > 0xffff ? 0xff : ((x) <= 0xff ? 0 : ((x) >> 8 )))
726
727static void
728_emotion_yuy2_to_bgra32(int width, int height, unsigned char *src, unsigned char *dst)
729{
730 int line, col, linewidth;
731 int y, yy;
732 int u, v;
733 int vr, ug, vg, ub;
734 int r, g, b;
735 unsigned char *py, *pu, *pv;
736
737 linewidth = width - (width >> 1);
738 py = src;
739 pu = src + 1;
740 pv = src + 3;
741
742 y = *py;
743 yy = y << 8;
744 u = *pu - 128;
745 ug = 88 * u;
746 ub = 454 * u;
747 v = *pv - 128;
748 vg = 183 * v;
749 vr = 359 * v;
750
751 for (line = 0; line < height; line++)
752 {
753 for (col = 0; col < width; col++)
754 {
755 r = LIMIT(yy + vr);
756 g = LIMIT(yy - ug - vg);
757 b = LIMIT(yy + ub);
758 *dst++ = b;
759 *dst++ = g;
760 *dst++ = r;
761 *dst++ = 0;
762
763 py += 2;
764 y = *py;
765 yy = y << 8;
766 if ((col & 1) == 1)
767 {
768 pu += 4; //skip yvy every second y
769 pv += 4; //skip yuy every second y
770 }
771 u = *pu - 128;
772 ug = 88 * u;
773 ub = 454 * u;
774 v = *pv - 128;
775 vg = 183 * v;
776 vr = 359 * v;
777 }
778 }
779}