Merge commit '930e26a3ea9d223e04bac4cdde13697cec770031'

* commit '930e26a3ea9d223e04bac4cdde13697cec770031':
  x86: h264qpel: Only define mmxext QPEL functions if H264QPEL is enabled
  x86: PABSW: port to cpuflags
  x86: vc1dsp: port to cpuflags
  rtmp: Use av_strlcat instead of strncat

Conflicts:
	libavcodec/x86/h264_qpel.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-11-05 22:36:05 +01:00
commit e859339e7a
4 changed files with 55 additions and 59 deletions

View File

@ -1286,6 +1286,6 @@ QPEL16_OP(mc31, MMX)\
QPEL16_OP(mc32, MMX)\
QPEL16_OP(mc33, MMX)
#if CONFIG_H264QPEL && ARCH_X86_32 && HAVE_YASM // ARCH_X86_64 implies sse2+
#if ARCH_X86_32 && HAVE_YASM && CONFIG_H264QPEL // ARCH_X86_64 implies SSE2+
QPEL16(mmxext)
#endif

View File

@ -34,7 +34,13 @@ section .text
punpckl%1 m%2, m%4
%endmacro
%macro STORE_4_WORDS_MMX 6
%macro STORE_4_WORDS 6
%if cpuflag(sse4)
pextrw %1, %5, %6+0
pextrw %2, %5, %6+1
pextrw %3, %5, %6+2
pextrw %4, %5, %6+3
%else
movd %6d, %5
%if mmsize==16
psrldq %5, 4
@ -48,13 +54,7 @@ section .text
mov %3, %6w
shr %6, 16
mov %4, %6w
%endmacro
%macro STORE_4_WORDS_SSE4 6
pextrw %1, %5, %6+0
pextrw %2, %5, %6+1
pextrw %3, %5, %6+2
pextrw %4, %5, %6+3
%endif
%endmacro
; in: p1 p0 q0 q1, clobbers p0
@ -200,14 +200,14 @@ section .text
VC1_FILTER %1
punpcklbw m0, m1
%if %0 > 1
STORE_4_WORDS_MMX [r0-1], [r0+r1-1], [r0+2*r1-1], [r0+r3-1], m0, %2
STORE_4_WORDS [r0-1], [r0+r1-1], [r0+2*r1-1], [r0+r3-1], m0, %2
%if %1 > 4
psrldq m0, 4
STORE_4_WORDS_MMX [r4-1], [r4+r1-1], [r4+2*r1-1], [r4+r3-1], m0, %2
STORE_4_WORDS [r4-1], [r4+r1-1], [r4+2*r1-1], [r4+r3-1], m0, %2
%endif
%else
STORE_4_WORDS_SSE4 [r0-1], [r0+r1-1], [r0+2*r1-1], [r0+r3-1], m0, 0
STORE_4_WORDS_SSE4 [r4-1], [r4+r1-1], [r4+2*r1-1], [r4+r3-1], m0, 4
STORE_4_WORDS [r0-1], [r0+r1-1], [r0+2*r1-1], [r0+r3-1], m0, 0
STORE_4_WORDS [r4-1], [r4+r1-1], [r4+2*r1-1], [r4+r3-1], m0, 4
%endif
%endmacro
@ -228,92 +228,90 @@ section .text
imul r2, 0x01010101
%endmacro
%macro VC1_LF_MMX 1
INIT_MMX
cglobal vc1_v_loop_filter_internal_%1
%macro VC1_LF 0
cglobal vc1_v_loop_filter_internal
VC1_V_LOOP_FILTER 4, d
ret
cglobal vc1_h_loop_filter_internal_%1
cglobal vc1_h_loop_filter_internal
VC1_H_LOOP_FILTER 4, r4
ret
; void ff_vc1_v_loop_filter4_mmx2(uint8_t *src, int stride, int pq)
cglobal vc1_v_loop_filter4_%1, 3,5,0
; void ff_vc1_v_loop_filter4_mmxext(uint8_t *src, int stride, int pq)
cglobal vc1_v_loop_filter4, 3,5,0
START_V_FILTER
call vc1_v_loop_filter_internal_%1
call vc1_v_loop_filter_internal
RET
; void ff_vc1_h_loop_filter4_mmx2(uint8_t *src, int stride, int pq)
cglobal vc1_h_loop_filter4_%1, 3,5,0
; void ff_vc1_h_loop_filter4_mmxext(uint8_t *src, int stride, int pq)
cglobal vc1_h_loop_filter4, 3,5,0
START_H_FILTER 4
call vc1_h_loop_filter_internal_%1
call vc1_h_loop_filter_internal
RET
; void ff_vc1_v_loop_filter8_mmx2(uint8_t *src, int stride, int pq)
cglobal vc1_v_loop_filter8_%1, 3,5,0
; void ff_vc1_v_loop_filter8_mmxext(uint8_t *src, int stride, int pq)
cglobal vc1_v_loop_filter8, 3,5,0
START_V_FILTER
call vc1_v_loop_filter_internal_%1
call vc1_v_loop_filter_internal
add r4, 4
add r0, 4
call vc1_v_loop_filter_internal_%1
call vc1_v_loop_filter_internal
RET
; void ff_vc1_h_loop_filter8_mmx2(uint8_t *src, int stride, int pq)
cglobal vc1_h_loop_filter8_%1, 3,5,0
; void ff_vc1_h_loop_filter8_mmxext(uint8_t *src, int stride, int pq)
cglobal vc1_h_loop_filter8, 3,5,0
START_H_FILTER 4
call vc1_h_loop_filter_internal_%1
call vc1_h_loop_filter_internal
lea r0, [r0+4*r1]
call vc1_h_loop_filter_internal_%1
call vc1_h_loop_filter_internal
RET
%endmacro
%define PABSW PABSW_MMXEXT
VC1_LF_MMX mmx2
INIT_MMX mmxext
VC1_LF
INIT_XMM
INIT_XMM sse2
; void ff_vc1_v_loop_filter8_sse2(uint8_t *src, int stride, int pq)
cglobal vc1_v_loop_filter8_sse2, 3,5,8
cglobal vc1_v_loop_filter8, 3,5,8
START_V_FILTER
VC1_V_LOOP_FILTER 8, q
RET
; void ff_vc1_h_loop_filter8_sse2(uint8_t *src, int stride, int pq)
cglobal vc1_h_loop_filter8_sse2, 3,6,8
cglobal vc1_h_loop_filter8, 3,6,8
START_H_FILTER 8
VC1_H_LOOP_FILTER 8, r5
RET
%define PABSW PABSW_SSSE3
INIT_MMX
INIT_MMX ssse3
; void ff_vc1_v_loop_filter4_ssse3(uint8_t *src, int stride, int pq)
cglobal vc1_v_loop_filter4_ssse3, 3,5,0
cglobal vc1_v_loop_filter4, 3,5,0
START_V_FILTER
VC1_V_LOOP_FILTER 4, d
RET
; void ff_vc1_h_loop_filter4_ssse3(uint8_t *src, int stride, int pq)
cglobal vc1_h_loop_filter4_ssse3, 3,5,0
cglobal vc1_h_loop_filter4, 3,5,0
START_H_FILTER 4
VC1_H_LOOP_FILTER 4, r4
RET
INIT_XMM
INIT_XMM ssse3
; void ff_vc1_v_loop_filter8_ssse3(uint8_t *src, int stride, int pq)
cglobal vc1_v_loop_filter8_ssse3, 3,5,8
cglobal vc1_v_loop_filter8, 3,5,8
START_V_FILTER
VC1_V_LOOP_FILTER 8, q
RET
; void ff_vc1_h_loop_filter8_ssse3(uint8_t *src, int stride, int pq)
cglobal vc1_h_loop_filter8_ssse3, 3,6,8
cglobal vc1_h_loop_filter8, 3,6,8
START_H_FILTER 8
VC1_H_LOOP_FILTER 8, r5
RET
INIT_XMM sse4
; void ff_vc1_h_loop_filter8_sse4(uint8_t *src, int stride, int pq)
cglobal vc1_h_loop_filter8_sse4, 3,5,8
cglobal vc1_h_loop_filter8, 3,5,8
START_H_FILTER 8
VC1_H_LOOP_FILTER 8
RET

View File

@ -49,7 +49,7 @@ static void vc1_h_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \
}
#if HAVE_YASM
LOOP_FILTER(mmx2)
LOOP_FILTER(mmxext)
LOOP_FILTER(sse2)
LOOP_FILTER(ssse3)
@ -98,7 +98,7 @@ av_cold void ff_vc1dsp_init_x86(VC1DSPContext *dsp)
}
if (mm_flags & AV_CPU_FLAG_MMXEXT) {
ASSIGN_LF(mmx2);
ASSIGN_LF(mmxext);
dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_mmx2;
} else if (mm_flags & AV_CPU_FLAG_3DNOW) {
dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_3dnow;

View File

@ -145,13 +145,21 @@
%endif
%endmacro
; PABSW macros assume %1 != %2, while ABS1/2 macros work in-place
%macro PABSW_MMX 2
; PABSW macro assumes %1 != %2, while ABS1/2 macros work in-place
%macro PABSW 2
%if cpuflag(ssse3)
pabsw %1, %2
%elif cpuflag(mmxext)
pxor %1, %1
psubw %1, %2
pmaxsw %1, %2
%else
pxor %1, %1
pcmpgtw %1, %2
pxor %2, %1
psubw %2, %1
SWAP %1, %2
%endif
%endmacro
%macro PSIGNW_MMX 2
@ -159,16 +167,6 @@
psubw %1, %2
%endmacro
%macro PABSW_MMXEXT 2
pxor %1, %1
psubw %1, %2
pmaxsw %1, %2
%endmacro
%macro PABSW_SSSE3 2
pabsw %1, %2
%endmacro
%macro PSIGNW_SSSE3 2
psignw %1, %2
%endmacro