diff --git a/TODO b/TODO index 8cecc100c..884f13aae 100644 --- a/TODO +++ b/TODO @@ -40,6 +40,9 @@ Some of the things (in very short form) that need to be done to E17... ESSENTIAL FEATURES ------------------------------------------------------------------------------- +* shaded windows should not allow border changes by the user +* if a border is borderless it should not be allowed to be shaded (padl/r/t/b + is 0) * add fullscreen support (e16 xvidmode or xrandr style) * implement thses maximise/fullscreen modes: Which of these should be different modes, and which should be options for @@ -199,8 +202,6 @@ Some of the things (in very short form) that need to be done to E17... OPTIMISATIONS ------------------------------------------------------------------------------- -* could speedup dropshadow's gaussian blurr with mmx and sse by doign 2 or 4 - rows at once * optimize clock theme (speed/cpu) (rotation in edje??? or evas???) ]]] diff --git a/src/bin/Makefile.am b/src/bin/Makefile.am index b128a0eca..ec72385c8 100644 --- a/src/bin/Makefile.am +++ b/src/bin/Makefile.am @@ -12,6 +12,7 @@ bin_PROGRAMS = enlightenment enlightenment_remote enlightenment_eapp ENLIGHTENMENTHEADERS = \ e.h \ e_includes.h \ +e_mmx.h \ e_ipc_handlers.h \ e_ipc_handlers_list.h \ e_user.h \ diff --git a/src/bin/e_border.c b/src/bin/e_border.c index 80232058a..2de4b202b 100644 --- a/src/bin/e_border.c +++ b/src/bin/e_border.c @@ -4886,6 +4886,7 @@ _e_border_menu_show(E_Border *bd, Evas_Coord x, Evas_Coord y, int key) e_menu_item_toggle_set(mi, bd->var); \ e_menu_item_callback_set(mi, _e_border_menu_cb_##var, bd); + /* FIXME: 1 big menu right now - mayeb layer make it 3 submenus? */ NEW_LOCK_MI("Position (User)", lock_user_location); NEW_LOCK_MI("Size (User)", lock_user_size); NEW_LOCK_MI("Stacking (User)", lock_user_stacking); diff --git a/src/bin/e_includes.h b/src/bin/e_includes.h index 4cff9ffe0..c9c561282 100644 --- a/src/bin/e_includes.h +++ b/src/bin/e_includes.h @@ -1,6 +1,7 @@ /* * vim:ts=8:sw=3:sts=8:noexpandtab:cino=>5n-3f0^-2{2 */ +#include "e_mmx.h" #include "e_object.h" #include "e_user.h" #include "e_manager.h" diff --git a/src/bin/e_mmx.h b/src/bin/e_mmx.h new file mode 100644 index 000000000..957092435 --- /dev/null +++ b/src/bin/e_mmx.h @@ -0,0 +1,644 @@ +/* mmx.h + + MultiMedia eXtensions GCC interface library for IA32. + + To use this library, simply include this header file + and compile with GCC. You MUST have inlining enabled + in order for mmx_ok() to work; this can be done by + simply using -O on the GCC command line. + + Compiling with -DMMX_TRACE will cause detailed trace + output to be sent to stderr for each mmx operation. + This adds lots of code, and obviously slows execution to + a crawl, but can be very useful for debugging. + + THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY + EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT + LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY + AND FITNESS FOR ANY PARTICULAR PURPOSE. + + 1997-98 by H. Dietz and R. Fisher + + History: + 97-98* R.Fisher Early versions + 980501 R.Fisher Original Release + 980611* H.Dietz Rewrite, correctly implementing inlines, and + R.Fisher including direct register accesses. + 980616 R.Fisher Release of 980611 as 980616. + 980714 R.Fisher Minor corrections to Makefile, etc. + 980715 R.Fisher mmx_ok() now prevents optimizer from using + clobbered values. + mmx_ok() now checks if cpuid instruction is + available before trying to use it. + 980726* R.Fisher mm_support() searches for AMD 3DNow, Cyrix + Extended MMX, and standard MMX. It returns a + value which is positive if any of these are + supported, and can be masked with constants to + see which. mmx_ok() is now a call to this + 980726* R.Fisher Added i2r support for shift functions + 980919 R.Fisher Fixed AMD extended feature recognition bug. + 980921 R.Fisher Added definition/check for _MMX_H. + Added "float s[2]" to mmx_t for use with + 3DNow and EMMX. So same mmx_t can be used. + 981013 R.Fisher Fixed cpuid function 1 bug (looked at wrong reg) + Fixed psllq_i2r error in mmxtest.c + + * Unreleased (internal or interim) versions + + Notes: + It appears that the latest gas has the pand problem fixed, therefore + I'll undefine BROKEN_PAND by default. + String compares may be quicker than the multiple test/jumps in vendor + test sequence in mmx_ok(), but I'm not concerned with that right now. + + Acknowledgments: + Jussi Laako for pointing out the errors ultimately found to be + connected to the failure to notify the optimizer of clobbered values. + Roger Hardiman for reminding us that CPUID isn't everywhere, and that + someone may actually try to use this on a machine without CPUID. + Also for suggesting code for checking this. + Robert Dale for pointing out the AMD recognition bug. + Jimmy Mayfield and Carl Witty for pointing out the Intel recognition + bug. + Carl Witty for pointing out the psllq_i2r test bug. +*/ + +#ifndef _MMX_H +#define _MMX_H + +/* Warning: at this writing, the version of GAS packaged + with most Linux distributions does not handle the + parallel AND operation mnemonic correctly. If the + symbol BROKEN_PAND is defined, a slower alternative + coding will be used. If execution of mmxtest results + in an illegal instruction fault, define this symbol. +*/ +#undef BROKEN_PAND + + +/* The type of an value that fits in an MMX register + (note that long long constant values MUST be suffixed + by LL and unsigned long long values by ULL, lest + they be truncated by the compiler) +*/ +typedef union { + long long q; /* Quadword (64-bit) value */ + unsigned long long uq; /* Unsigned Quadword */ + int d[2]; /* 2 Doubleword (32-bit) values */ + unsigned int ud[2]; /* 2 Unsigned Doubleword */ + short w[4]; /* 4 Word (16-bit) values */ + unsigned short uw[4]; /* 4 Unsigned Word */ + char b[8]; /* 8 Byte (8-bit) values */ + unsigned char ub[8]; /* 8 Unsigned Byte */ + float s[2]; /* Single-precision (32-bit) value */ +} __attribute__ ((aligned (8))) mmx_t; + +/* Helper functions for the instruction macros that follow... + (note that memory-to-register, m2r, instructions are nearly + as efficient as register-to-register, r2r, instructions; + however, memory-to-memory instructions are really simulated + as a convenience, and are only 1/3 as efficient) +*/ + +/* These macros are a lot simpler without the tracing... +*/ + +#define mmx_i2r(op, imm, reg) \ + __asm__ __volatile__ (#op " $" #imm ", %%" #reg \ + : /* nothing */ \ + : /* nothing */); + +#define mmx_m2r(op, mem, reg) \ + __asm__ __volatile__ (#op " %0, %%" #reg \ + : /* nothing */ \ + : "m" (mem)) + +#define mmx_r2m(op, reg, mem) \ + __asm__ __volatile__ (#op " %%" #reg ", %0" \ + : "=m" (mem) \ + : /* nothing */ ) + +#define mmx_r2r(op, regs, regd) \ + __asm__ __volatile__ (#op " %" #regs ", %" #regd) + +#define mmx_m2m(op, mems, memd) \ + __asm__ __volatile__ ("movq %0, %%mm0\n\t" \ + #op " %1, %%mm0\n\t" \ + "movq %%mm0, %0" \ + : "=m" (memd) \ + : "m" (mems)) + +/* 1x64 MOVe Quadword + (this is both a load and a store... + in fact, it is the only way to store) +*/ +#define movq_m2r(var, reg) mmx_m2r(movq, var, reg) +#define movq_r2m(reg, var) mmx_r2m(movq, reg, var) +#define movq_r2r(regs, regd) mmx_r2r(movq, regs, regd) +#define movq(vars, vard) \ + __asm__ __volatile__ ("movq %1, %%mm0\n\t" \ + "movq %%mm0, %0" \ + : "=m" (vard) \ + : "m" (vars)) +#define movntq_r2m(reg, var) mmx_r2m(movntq, reg, var) + + +/* 1x32 MOVe Doubleword + (like movq, this is both load and store... + but is most useful for moving things between + mmx registers and ordinary registers) +*/ +#define movd_m2r(var, reg) mmx_m2r(movd, var, reg) +#define movd_r2m(reg, var) mmx_r2m(movd, reg, var) +#define movd_r2r(regs, regd) mmx_r2r(movd, regs, regd) +#define movd(vars, vard) \ + __asm__ __volatile__ ("movd %1, %%mm0\n\t" \ + "movd %%mm0, %0" \ + : "=m" (vard) \ + : "m" (vars)) + + +/* 2x32, 4x16, and 8x8 Parallel ADDs +*/ +#define paddd_m2r(var, reg) mmx_m2r(paddd, var, reg) +#define paddd_r2r(regs, regd) mmx_r2r(paddd, regs, regd) +#define paddd(vars, vard) mmx_m2m(paddd, vars, vard) + +#define paddw_m2r(var, reg) mmx_m2r(paddw, var, reg) +#define paddw_r2r(regs, regd) mmx_r2r(paddw, regs, regd) +#define paddw(vars, vard) mmx_m2m(paddw, vars, vard) + +#define paddb_m2r(var, reg) mmx_m2r(paddb, var, reg) +#define paddb_r2r(regs, regd) mmx_r2r(paddb, regs, regd) +#define paddb(vars, vard) mmx_m2m(paddb, vars, vard) + + +/* 4x16 and 8x8 Parallel ADDs using Saturation arithmetic +*/ +#define paddsw_m2r(var, reg) mmx_m2r(paddsw, var, reg) +#define paddsw_r2r(regs, regd) mmx_r2r(paddsw, regs, regd) +#define paddsw(vars, vard) mmx_m2m(paddsw, vars, vard) + +#define paddsb_m2r(var, reg) mmx_m2r(paddsb, var, reg) +#define paddsb_r2r(regs, regd) mmx_r2r(paddsb, regs, regd) +#define paddsb(vars, vard) mmx_m2m(paddsb, vars, vard) + + +/* 4x16 and 8x8 Parallel ADDs using Unsigned Saturation arithmetic +*/ +#define paddusw_m2r(var, reg) mmx_m2r(paddusw, var, reg) +#define paddusw_r2r(regs, regd) mmx_r2r(paddusw, regs, regd) +#define paddusw(vars, vard) mmx_m2m(paddusw, vars, vard) + +#define paddusb_m2r(var, reg) mmx_m2r(paddusb, var, reg) +#define paddusb_r2r(regs, regd) mmx_r2r(paddusb, regs, regd) +#define paddusb(vars, vard) mmx_m2m(paddusb, vars, vard) + + +/* 2x32, 4x16, and 8x8 Parallel SUBs +*/ +#define psubd_m2r(var, reg) mmx_m2r(psubd, var, reg) +#define psubd_r2r(regs, regd) mmx_r2r(psubd, regs, regd) +#define psubd(vars, vard) mmx_m2m(psubd, vars, vard) + +#define psubw_m2r(var, reg) mmx_m2r(psubw, var, reg) +#define psubw_r2r(regs, regd) mmx_r2r(psubw, regs, regd) +#define psubw(vars, vard) mmx_m2m(psubw, vars, vard) + +#define psubb_m2r(var, reg) mmx_m2r(psubb, var, reg) +#define psubb_r2r(regs, regd) mmx_r2r(psubb, regs, regd) +#define psubb(vars, vard) mmx_m2m(psubb, vars, vard) + + +/* 4x16 and 8x8 Parallel SUBs using Saturation arithmetic +*/ +#define psubsw_m2r(var, reg) mmx_m2r(psubsw, var, reg) +#define psubsw_r2r(regs, regd) mmx_r2r(psubsw, regs, regd) +#define psubsw(vars, vard) mmx_m2m(psubsw, vars, vard) + +#define psubsb_m2r(var, reg) mmx_m2r(psubsb, var, reg) +#define psubsb_r2r(regs, regd) mmx_r2r(psubsb, regs, regd) +#define psubsb(vars, vard) mmx_m2m(psubsb, vars, vard) + + +/* 4x16 and 8x8 Parallel SUBs using Unsigned Saturation arithmetic +*/ +#define psubusw_m2r(var, reg) mmx_m2r(psubusw, var, reg) +#define psubusw_r2r(regs, regd) mmx_r2r(psubusw, regs, regd) +#define psubusw(vars, vard) mmx_m2m(psubusw, vars, vard) + +#define psubusb_m2r(var, reg) mmx_m2r(psubusb, var, reg) +#define psubusb_r2r(regs, regd) mmx_r2r(psubusb, regs, regd) +#define psubusb(vars, vard) mmx_m2m(psubusb, vars, vard) + + +/* 4x16 Parallel MULs giving Low 4x16 portions of results +*/ +#define pmullw_m2r(var, reg) mmx_m2r(pmullw, var, reg) +#define pmullw_r2r(regs, regd) mmx_r2r(pmullw, regs, regd) +#define pmullw(vars, vard) mmx_m2m(pmullw, vars, vard) + + +/* 4x16 Parallel MULs giving High 4x16 portions of results +*/ +#define pmulhw_m2r(var, reg) mmx_m2r(pmulhw, var, reg) +#define pmulhw_r2r(regs, regd) mmx_r2r(pmulhw, regs, regd) +#define pmulhw(vars, vard) mmx_m2m(pmulhw, vars, vard) + + +/* 4x16->2x32 Parallel Mul-ADD + (muls like pmullw, then adds adjacent 16-bit fields + in the multiply result to make the final 2x32 result) +*/ +#define pmaddwd_m2r(var, reg) mmx_m2r(pmaddwd, var, reg) +#define pmaddwd_r2r(regs, regd) mmx_r2r(pmaddwd, regs, regd) +#define pmaddwd(vars, vard) mmx_m2m(pmaddwd, vars, vard) + + +/* 1x64 bitwise AND +*/ +#ifdef BROKEN_PAND +#define pand_m2r(var, reg) \ + { \ + mmx_m2r(pandn, (mmx_t) -1LL, reg); \ + mmx_m2r(pandn, var, reg); \ + } +#define pand_r2r(regs, regd) \ + { \ + mmx_m2r(pandn, (mmx_t) -1LL, regd); \ + mmx_r2r(pandn, regs, regd) \ + } +#define pand(vars, vard) \ + { \ + movq_m2r(vard, mm0); \ + mmx_m2r(pandn, (mmx_t) -1LL, mm0); \ + mmx_m2r(pandn, vars, mm0); \ + movq_r2m(mm0, vard); \ + } +#else +#define pand_m2r(var, reg) mmx_m2r(pand, var, reg) +#define pand_r2r(regs, regd) mmx_r2r(pand, regs, regd) +#define pand(vars, vard) mmx_m2m(pand, vars, vard) +#endif + + +/* 1x64 bitwise AND with Not the destination +*/ +#define pandn_m2r(var, reg) mmx_m2r(pandn, var, reg) +#define pandn_r2r(regs, regd) mmx_r2r(pandn, regs, regd) +#define pandn(vars, vard) mmx_m2m(pandn, vars, vard) + + +/* 1x64 bitwise OR +*/ +#define por_m2r(var, reg) mmx_m2r(por, var, reg) +#define por_r2r(regs, regd) mmx_r2r(por, regs, regd) +#define por(vars, vard) mmx_m2m(por, vars, vard) + + +/* 1x64 bitwise eXclusive OR +*/ +#define pxor_m2r(var, reg) mmx_m2r(pxor, var, reg) +#define pxor_r2r(regs, regd) mmx_r2r(pxor, regs, regd) +#define pxor(vars, vard) mmx_m2m(pxor, vars, vard) + + +/* 2x32, 4x16, and 8x8 Parallel CoMPare for EQuality + (resulting fields are either 0 or -1) +*/ +#define pcmpeqd_m2r(var, reg) mmx_m2r(pcmpeqd, var, reg) +#define pcmpeqd_r2r(regs, regd) mmx_r2r(pcmpeqd, regs, regd) +#define pcmpeqd(vars, vard) mmx_m2m(pcmpeqd, vars, vard) + +#define pcmpeqw_m2r(var, reg) mmx_m2r(pcmpeqw, var, reg) +#define pcmpeqw_r2r(regs, regd) mmx_r2r(pcmpeqw, regs, regd) +#define pcmpeqw(vars, vard) mmx_m2m(pcmpeqw, vars, vard) + +#define pcmpeqb_m2r(var, reg) mmx_m2r(pcmpeqb, var, reg) +#define pcmpeqb_r2r(regs, regd) mmx_r2r(pcmpeqb, regs, regd) +#define pcmpeqb(vars, vard) mmx_m2m(pcmpeqb, vars, vard) + + +/* 2x32, 4x16, and 8x8 Parallel CoMPare for Greater Than + (resulting fields are either 0 or -1) +*/ +#define pcmpgtd_m2r(var, reg) mmx_m2r(pcmpgtd, var, reg) +#define pcmpgtd_r2r(regs, regd) mmx_r2r(pcmpgtd, regs, regd) +#define pcmpgtd(vars, vard) mmx_m2m(pcmpgtd, vars, vard) + +#define pcmpgtw_m2r(var, reg) mmx_m2r(pcmpgtw, var, reg) +#define pcmpgtw_r2r(regs, regd) mmx_r2r(pcmpgtw, regs, regd) +#define pcmpgtw(vars, vard) mmx_m2m(pcmpgtw, vars, vard) + +#define pcmpgtb_m2r(var, reg) mmx_m2r(pcmpgtb, var, reg) +#define pcmpgtb_r2r(regs, regd) mmx_r2r(pcmpgtb, regs, regd) +#define pcmpgtb(vars, vard) mmx_m2m(pcmpgtb, vars, vard) + + +/* 1x64, 2x32, and 4x16 Parallel Shift Left Logical +*/ +#define psllq_i2r(imm, reg) mmx_i2r(psllq, imm, reg) +#define psllq_m2r(var, reg) mmx_m2r(psllq, var, reg) +#define psllq_r2r(regs, regd) mmx_r2r(psllq, regs, regd) +#define psllq(vars, vard) mmx_m2m(psllq, vars, vard) + +#define pslld_i2r(imm, reg) mmx_i2r(pslld, imm, reg) +#define pslld_m2r(var, reg) mmx_m2r(pslld, var, reg) +#define pslld_r2r(regs, regd) mmx_r2r(pslld, regs, regd) +#define pslld(vars, vard) mmx_m2m(pslld, vars, vard) + +#define psllw_i2r(imm, reg) mmx_i2r(psllw, imm, reg) +#define psllw_m2r(var, reg) mmx_m2r(psllw, var, reg) +#define psllw_r2r(regs, regd) mmx_r2r(psllw, regs, regd) +#define psllw(vars, vard) mmx_m2m(psllw, vars, vard) + + +/* 1x64, 2x32, and 4x16 Parallel Shift Right Logical +*/ +#define psrlq_i2r(imm, reg) mmx_i2r(psrlq, imm, reg) +#define psrlq_m2r(var, reg) mmx_m2r(psrlq, var, reg) +#define psrlq_r2r(regs, regd) mmx_r2r(psrlq, regs, regd) +#define psrlq(vars, vard) mmx_m2m(psrlq, vars, vard) + +#define psrld_i2r(imm, reg) mmx_i2r(psrld, imm, reg) +#define psrld_m2r(var, reg) mmx_m2r(psrld, var, reg) +#define psrld_r2r(regs, regd) mmx_r2r(psrld, regs, regd) +#define psrld(vars, vard) mmx_m2m(psrld, vars, vard) + +#define psrlw_i2r(imm, reg) mmx_i2r(psrlw, imm, reg) +#define psrlw_m2r(var, reg) mmx_m2r(psrlw, var, reg) +#define psrlw_r2r(regs, regd) mmx_r2r(psrlw, regs, regd) +#define psrlw(vars, vard) mmx_m2m(psrlw, vars, vard) + + +/* 2x32 and 4x16 Parallel Shift Right Arithmetic +*/ +#define psrad_i2r(imm, reg) mmx_i2r(psrad, imm, reg) +#define psrad_m2r(var, reg) mmx_m2r(psrad, var, reg) +#define psrad_r2r(regs, regd) mmx_r2r(psrad, regs, regd) +#define psrad(vars, vard) mmx_m2m(psrad, vars, vard) + +#define psraw_i2r(imm, reg) mmx_i2r(psraw, imm, reg) +#define psraw_m2r(var, reg) mmx_m2r(psraw, var, reg) +#define psraw_r2r(regs, regd) mmx_r2r(psraw, regs, regd) +#define psraw(vars, vard) mmx_m2m(psraw, vars, vard) + + +/* 2x32->4x16 and 4x16->8x8 PACK and Signed Saturate + (packs source and dest fields into dest in that order) +*/ +#define packssdw_m2r(var, reg) mmx_m2r(packssdw, var, reg) +#define packssdw_r2r(regs, regd) mmx_r2r(packssdw, regs, regd) +#define packssdw(vars, vard) mmx_m2m(packssdw, vars, vard) + +#define packsswb_m2r(var, reg) mmx_m2r(packsswb, var, reg) +#define packsswb_r2r(regs, regd) mmx_r2r(packsswb, regs, regd) +#define packsswb(vars, vard) mmx_m2m(packsswb, vars, vard) + + +/* 4x16->8x8 PACK and Unsigned Saturate + (packs source and dest fields into dest in that order) +*/ +#define packuswb_m2r(var, reg) mmx_m2r(packuswb, var, reg) +#define packuswb_r2r(regs, regd) mmx_r2r(packuswb, regs, regd) +#define packuswb(vars, vard) mmx_m2m(packuswb, vars, vard) + + +/* 2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK Low + (interleaves low half of dest with low half of source + as padding in each result field) +*/ +#define punpckldq_m2r(var, reg) mmx_m2r(punpckldq, var, reg) +#define punpckldq_r2r(regs, regd) mmx_r2r(punpckldq, regs, regd) +#define punpckldq(vars, vard) mmx_m2m(punpckldq, vars, vard) + +#define punpcklwd_m2r(var, reg) mmx_m2r(punpcklwd, var, reg) +#define punpcklwd_r2r(regs, regd) mmx_r2r(punpcklwd, regs, regd) +#define punpcklwd(vars, vard) mmx_m2m(punpcklwd, vars, vard) + +#define punpcklbw_m2r(var, reg) mmx_m2r(punpcklbw, var, reg) +#define punpcklbw_r2r(regs, regd) mmx_r2r(punpcklbw, regs, regd) +#define punpcklbw(vars, vard) mmx_m2m(punpcklbw, vars, vard) + + +/* 2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK High + (interleaves high half of dest with high half of source + as padding in each result field) +*/ +#define punpckhdq_m2r(var, reg) mmx_m2r(punpckhdq, var, reg) +#define punpckhdq_r2r(regs, regd) mmx_r2r(punpckhdq, regs, regd) +#define punpckhdq(vars, vard) mmx_m2m(punpckhdq, vars, vard) + +#define punpckhwd_m2r(var, reg) mmx_m2r(punpckhwd, var, reg) +#define punpckhwd_r2r(regs, regd) mmx_r2r(punpckhwd, regs, regd) +#define punpckhwd(vars, vard) mmx_m2m(punpckhwd, vars, vard) + +#define punpckhbw_m2r(var, reg) mmx_m2r(punpckhbw, var, reg) +#define punpckhbw_r2r(regs, regd) mmx_r2r(punpckhbw, regs, regd) +#define punpckhbw(vars, vard) mmx_m2m(punpckhbw, vars, vard) + +#define MOVE_8DWORDS_MMX(src,dst) \ + __asm__ ( \ + "movq (%1), %%mm0 \n" \ + "movq 0x8(%1), %%mm1 \n" \ + "movq 0x10(%1), %%mm2 \n" \ + "movq 0x18(%1), %%mm3 \n" \ + "movq %%mm0, (%0) \n" \ + "movq %%mm1, 0x8(%0) \n" \ + "movq %%mm2, 0x10(%0) \n" \ + "movq %%mm3, 0x18(%0) \n" \ + : \ + : "q" (dst), "r" (src) \ + : "memory", "st"); + +#define MOVE_10DWORDS_MMX(src,dst) \ + __asm__ ( \ + "movq (%1), %%mm0 \n" \ + "movq 0x8(%1), %%mm1 \n" \ + "movq 0x10(%1), %%mm2 \n" \ + "movq 0x18(%1), %%mm3 \n" \ + "movq 0x20(%1), %%mm4 \n" \ + "movq %%mm0, (%0) \n" \ + "movq %%mm1, 0x8(%0) \n" \ + "movq %%mm2, 0x10(%0) \n" \ + "movq %%mm3, 0x18(%0) \n" \ + "movq %%mm4, 0x20(%0) \n" \ + : \ + : "q" (dst), "r" (src) \ + : "memory", "st"); + +#define MOVE_16DWORDS_MMX(src,dst) \ + __asm__ ( \ + "movq (%1), %%mm0 \n" \ + "movq 0x8(%1), %%mm1 \n" \ + "movq 0x10(%1), %%mm2 \n" \ + "movq 0x18(%1), %%mm3 \n" \ + "movq 0x20(%1), %%mm4 \n" \ + "movq 0x28(%1), %%mm5 \n" \ + "movq 0x30(%1), %%mm6 \n" \ + "movq 0x38(%1), %%mm7 \n" \ + "movq %%mm0, (%0) \n" \ + "movq %%mm1, 0x8(%0) \n" \ + "movq %%mm2, 0x10(%0) \n" \ + "movq %%mm3, 0x18(%0) \n" \ + "movq %%mm4, 0x20(%0) \n" \ + "movq %%mm5, 0x28(%0) \n" \ + "movq %%mm6, 0x30(%0) \n" \ + "movq %%mm7, 0x38(%0) \n" \ + : \ + : "q" (dst), "r" (src) \ + : "memory", "st"); + +#define MOVE_16DWORDS_MMX2(src,dst) \ + __asm__ ( \ + "movq (%1), %%mm0 \n" \ + "movq 0x8(%1), %%mm1 \n" \ + "movq 0x10(%1), %%mm2 \n" \ + "movq 0x18(%1), %%mm3 \n" \ + "movq 0x20(%1), %%mm4 \n" \ + "movq 0x28(%1), %%mm5 \n" \ + "movq 0x30(%1), %%mm6 \n" \ + "movq 0x38(%1), %%mm7 \n" \ + "movntq %%mm0, (%0) \n" \ + "movntq %%mm1, 0x8(%0) \n" \ + "movntq %%mm2, 0x10(%0) \n" \ + "movntq %%mm3, 0x18(%0) \n" \ + "movntq %%mm4, 0x20(%0) \n" \ + "movntq %%mm5, 0x28(%0) \n" \ + "movntq %%mm6, 0x30(%0) \n" \ + "movntq %%mm7, 0x38(%0) \n" \ + : \ + : "q" (dst), "r" (src) \ + : "memory", "st"); + +/* Empty MMx State + (used to clean-up when going from mmx to float use + of the registers that are shared by both; note that + there is no float-to-mmx operation needed, because + only the float tag word info is corruptible) +*/ + +#define emms() __asm__ __volatile__ ("emms":::"memory") +#define sfence() __asm__ __volatile__ ("sfence":::"memory") + +/* additions to detect mmx - */ +/* Raster */ + +#define CPUID_MMX (1 << 23) /* flags: mmx */ +#define CPUID_SSE (1 << 25) /* flags: xmm */ +#define CPUID_SSE2 (1 << 26) /* flags: ? */ + +#ifdef __amd64 +#define have_cpuid(cpuid_ret) \ + __asm__ __volatile__ ( \ + ".align 32 \n" \ + " pushq %%rbx \n" \ + " pushfq \n" \ + " popq %%rax \n" \ + " movq %%rax, %%rbx \n" \ + " xorq $0x200000, %%rax \n" \ + " pushq %%rax \n" \ + " popfq \n" \ + " pushfq \n" \ + " popq %%rax \n" \ + " cmpq %%rax, %%rbx \n" \ + " je 1f \n" \ + " movl $1, %0 \n" \ + " jmp 2f \n" \ + "1: \n" \ + " movl $0, %0 \n" \ + "2: \n" \ + " popq %%rbx \n" \ + : "=m" (cpuid_ret) \ + ); + +#define get_cpuid(cpuid_ret) \ + __asm__ __volatile__ ( \ + ".align 32 \n" \ + " pushq %%rax \n" \ + " movl $1, %%eax \n" \ + " cpuid \n" \ + " test $0x00800000, %%edx\n" \ + "1: \n" \ + " movl %%edx, %0 \n" \ + " jmp 2f \n" \ + "2: \n" \ + " movl $0, %0 \n" \ + " popq %%rax \n" \ + : "=m" (cpuid_ret) \ + ); +#else +#define have_cpuid(cpuid_ret) \ + __asm__ __volatile__ ( \ + ".align 32 \n" \ + " pushl %%ebx \n" \ + " pushfl \n" \ + " popl %%eax \n" \ + " movl %%eax, %%ebx \n" \ + " xorl $0x200000, %%eax \n" \ + " pushl %%eax \n" \ + " popfl \n" \ + " pushfl \n" \ + " popl %%eax \n" \ + " cmpl %%eax, %%ebx \n" \ + " je 1f \n" \ + " movl $1, %0 \n" \ + " jmp 2f \n" \ + "1: \n" \ + " movl $0, %0 \n" \ + "2: \n" \ + " popl %%ebx \n" \ + : "=m" (cpuid_ret) \ + ); + +#define get_cpuid(cpuid_ret) \ + __asm__ __volatile__ ( \ + ".align 32 \n" \ + " pushl %%eax \n" \ + " movl $1, %%eax \n" \ + " cpuid \n" \ + " test $0x00800000, %%edx\n" \ + "1: \n" \ + " movl %%edx, %0 \n" \ + " jmp 2f \n" \ + "2: \n" \ + " movl $0, %0 \n" \ + " popl %%eax \n" \ + : "=m" (cpuid_ret) \ + ); +#endif + +/* P3 instructions - need to figure how to detect? */ +#define prefetch(var) \ + __asm__ __volatile__ ( \ + "prefetchnta (%0) \n" \ + : \ + : "r" (var) \ + ); +#define prefetch0(var) \ + __asm__ __volatile__ ( \ + "prefetcht0 (%0) \n" \ + : \ + : "r" (var) \ + ); +#define prefetch1(var) \ + __asm__ __volatile__ ( \ + "prefetcht1 (%0) \n" \ + : \ + : "r" (var) \ + ); +#define prefetch2(var) \ + __asm__ __volatile__ ( \ + "prefetcht2 (%0) \n" \ + : \ + : "r" (var) \ + ); +#define pshufw(r1, r2, imm) \ + __asm__ __volatile__ ( \ + "pshufw $" #imm ", %" #r1 ", %" #r2 " \n" \ + ); + +/* end additions */ + +#endif + diff --git a/src/modules/dropshadow/e_mod_main.c b/src/modules/dropshadow/e_mod_main.c index b061ec772..863151b31 100644 --- a/src/modules/dropshadow/e_mod_main.c +++ b/src/modules/dropshadow/e_mod_main.c @@ -1,6 +1,11 @@ #include "e.h" #include "e_mod_main.h" +/* i measure a mere 9% speedup using mmx for simd sums. :( + * need to detect mmx capbale cpu's to enable this though +#define MMX 1 +*/ + /* TODO List: * * * bug in shadow_x < 0 and shadow_y < 0 needs to be fixed (not urgent though) @@ -99,6 +104,23 @@ e_modapi_init(E_Module *m) } ds = _ds_init(m); m->config_menu = _ds_config_menu_new(ds); +/* + { + Shpix *sh; + double t1, t2; + int i; + + sh = _ds_shpix_new(1000, 1000); + t1 = ecore_time_get(); + for (i = 0; i < 100; i++) + { + _ds_shpix_blur(sh, 0, 0, 1000, 100, ds->table.gauss, ds->conf->blur_size); + } + t2 = ecore_time_get(); + printf("blur time: %3.3f\n", t2 -t1); + _ds_shpix_free(sh); + } + */ return ds; } @@ -1470,12 +1492,36 @@ _ds_gauss_blur_h(unsigned char *pix, unsigned char *pix_dst, int pix_w, int pix_ for (y = ry; y < ryy; y++) { p1 = pp; +#ifdef MMX + /* sum 4 pixels at once */ + pxor_r2r(mm7, mm7); // mm7 = 00000000 + pxor_r2r(mm2, mm2); // mm2 = 00000000 + for (l = 0; l <= (l2 - 3); l += 4) + { + movd_m2r(((int *)p1)[0], mm0); // mm0 = 0000abcd + movd_m2r(((int *)(&lut[l]))[0], mm1); // mm1 = 0000wxyz + punpcklbw_r2r(mm2, mm0); // mm0 = 0a0b0c0d + punpcklbw_r2r(mm2, mm1); // mm1 = 0w0x0y0z + pmaddwd_r2r(mm0, mm1); // mm1 = (a * w) + (b * x) | (c * y) + (d * z) + paddd_r2r(mm1, mm7); // mm7 += (c * y) + (d * z) + psrlq_i2r(32, mm1); // mm0 = 0000 | (a * w) + (b * x) + paddd_r2r(mm1, mm7); // mm7 += (a * w) + (b * x) + p1 += 4; + } + movd_r2m(mm7, sum); // sum = mm7 + for (; l <= l2; l++) + { + sum += (int)(*p1) * (int)lut[l]; + p1++; + } +#else sum = 0; for (l = 0; l <= l2; l++) { sum += (int)(*p1) * (int)lut[l]; p1++; } +#endif *p2 = sum / full; p2 += pix_w; pp += pix_w; @@ -1501,13 +1547,16 @@ _ds_gauss_blur_h(unsigned char *pix, unsigned char *pix_dst, int pix_w, int pix_ } } } +#ifdef MMX + emms(); +#endif } static void _ds_gauss_blur_v(unsigned char *pix, unsigned char *pix_dst, int pix_w, int pix_h, unsigned char *lut, int blur, int rx, int ry, int rxx, int ryy) { int x, y; - int i, sum, weight, l, l1, l2, wt, y1, y2; + int i, sum, weight, l, l1, l2, wt, y1, y2, tpix; unsigned char *p1, *p2, *pp; int full, usefull; @@ -1542,12 +1591,39 @@ _ds_gauss_blur_v(unsigned char *pix, unsigned char *pix_dst, int pix_w, int pix_ for (x = rx; x < rxx; x++) { p1 = pp; +#ifdef MMX + /* sum 4 pixels at once */ + pxor_r2r(mm7, mm7); // mm7 = 00000000 + pxor_r2r(mm2, mm2); // mm2 = 00000000 + for (l = 0; l <= (l2 - 3); l += 4) + { + tpix = (p1[0]); p1 += pix_w; + tpix |= (p1[0] << 8); p1 += pix_w; + tpix |= (p1[0] << 16); p1 += pix_w; + tpix |= (p1[0] << 24); p1 += pix_w; + movd_m2r(tpix, mm0); // mm0 = 0000abcd + movd_m2r(((int *)(&lut[l]))[0], mm1); // mm1 = 0000wxyz + punpcklbw_r2r(mm2, mm0); // mm0 = 0a0b0c0d + punpcklbw_r2r(mm2, mm1); // mm1 = 0w0x0y0z + pmaddwd_r2r(mm0, mm1); // mm1 = (a * w) + (b * x) | (c * y) + (d * z) + paddd_r2r(mm1, mm7); // mm7 += (c * y) + (d * z) + psrlq_i2r(32, mm1); // mm0 = 0000 | (a * w) + (b * x) + paddd_r2r(mm1, mm7); // mm7 += (a * w) + (b * x) + } + movd_r2m(mm7, sum); // sum = mm7 + for (; l <= l2; l++) + { + sum += (int)(*p1) * (int)lut[l]; + p1 += pix_w; + } +#else sum = 0; for (l = 0; l <= l2; l++) { sum += (int)(*p1) * (int)lut[l]; p1 += pix_w; } +#endif *p2 = sum / full; p2++; pp++; @@ -1573,6 +1649,9 @@ _ds_gauss_blur_v(unsigned char *pix, unsigned char *pix_dst, int pix_w, int pix_ } } } +#ifdef MMX + emms(); +#endif } static Shpix * @@ -1765,7 +1844,7 @@ _ds_shpix_object_set(Shpix *sp, Evas_Object *o, int x, int y, int w, int h) { unsigned char *p; unsigned int *pix2, *p2; - int xx, yy, jump; + int xx, yy, jump, pix; if (!sp) return; if (!o) return; @@ -1799,17 +1878,43 @@ _ds_shpix_object_set(Shpix *sp, Evas_Object *o, int x, int y, int w, int h) p2 = pix2; for (yy = 0; yy < h; yy++) { +#ifdef MMX + /* expand 2 pixels at once */ + for (xx = 0; xx < (w - 1); xx += 2) + { + pix = (p[1] << 24) | (p[0] << 8); + movd_m2r(pix, mm1); // mm1 = A0a0 + pxor_r2r(mm0, mm0); // mm0 = 00000000 + punpcklbw_r2r(mm1, mm0); // mm0 = A000a000 + movq_r2m(mm0, p2[0]); // *p2 = mm0; + p2 += 2; + p += 2; + } + for (; xx < w; xx++) + { + *p2 = ((*p) << 24); + p2++; + p++; + } +#else for (xx = 0; xx < w; xx++) { *p2 = ((*p) << 24); p2++; p++; } +#endif p += jump; } evas_object_image_data_set(o, pix2); evas_object_image_data_update_add(o, 0, 0, w, h); } +#ifdef MMX + /* we did mmx stuff - cleanup */ + emms(); +#else +#endif + } static void