This is simpler, consistent with some other asm code and the gcc documentation and in addition also works with e.g. tinycc.
Signed-off-by: Reimar Döffinger <reimar.doeffin...@gmx.de> --- libavcodec/msmpeg4.c | 2 +- libavcodec/x86/ac3dsp_init.c | 4 +-- libavcodec/x86/cabac.h | 6 ++-- libavcodec/x86/fdct.c | 4 +-- libavcodec/x86/h264_i386.h | 4 +-- libavcodec/x86/lpc.c | 4 +-- libavcodec/x86/me_cmp_init.c | 4 +-- libavcodec/x86/mpegvideo.c | 8 +++--- libavcodec/x86/mpegvideoenc.c | 4 +-- libavcodec/x86/mpegvideoenc_template.c | 8 +++--- libavcodec/x86/simple_idct.c | 2 +- libavcodec/x86/snowdsp.c | 6 ++-- libavcodec/x86/vc1dsp_mmx.c | 4 +-- libavcodec/x86/xvididct_sse2.c | 10 +++---- libavfilter/libmpcodecs/vf_eq.c | 2 +- libavfilter/libmpcodecs/vf_eq2.c | 2 +- libavfilter/x86/vf_noise.c | 6 ++-- libpostproc/postprocess_template.c | 46 +++++++++++++++--------------- libswscale/x86/hscale_fast_bilinear_simd.c | 8 +++--- libswscale/x86/rgb2rgb_template.c | 22 +++++++------- libswscale/x86/swscale.c | 8 +++--- libswscale/x86/swscale_template.c | 10 +++---- 22 files changed, 87 insertions(+), 87 deletions(-) diff --git a/libavcodec/msmpeg4.c b/libavcodec/msmpeg4.c index 7300af3..c49b8ae 100644 --- a/libavcodec/msmpeg4.c +++ b/libavcodec/msmpeg4.c @@ -237,7 +237,7 @@ int ff_msmpeg4_pred_dc(MpegEncContext *s, int n, "movl %%edx, %2 \n\t" : "+b" (a), "+c" (b), "+D" (c) : "g" (scale), "S" (ff_inverse[scale]) - : "%eax", "%edx" + : "eax", "edx" ); #else /* Divisions are costly everywhere; optimize the most common case. */ diff --git a/libavcodec/x86/ac3dsp_init.c b/libavcodec/x86/ac3dsp_init.c index 30a85f9..9a7fa4b 100644 --- a/libavcodec/x86/ac3dsp_init.c +++ b/libavcodec/x86/ac3dsp_init.c @@ -109,8 +109,8 @@ void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input, "r"(samples[2] + len), \ "r"(samples[3] + len), \ "r"(samples[4] + len) \ - : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \ - "%xmm4", "%xmm5", "%xmm6", "%xmm7",) \ + : XMM_CLOBBERS("xmm0", "xmm1", "xmm2", "xmm3", \ + "xmm4", "xmm5", "xmm6", "xmm7",) \ "memory" \ ); diff --git a/libavcodec/x86/cabac.h b/libavcodec/x86/cabac.h index 3510336..e574b76 100644 --- a/libavcodec/x86/cabac.h +++ b/libavcodec/x86/cabac.h @@ -203,7 +203,7 @@ static av_always_inline int get_cabac_inline_x86(CABACContext *c, "i"(offsetof(CABACContext, bytestream_end)) TABLES_ARG ,"1"(c->low), "2"(c->range) - : "%"REG_c, "memory" + : REG_c, "memory" ); return bit & 1; } @@ -251,7 +251,7 @@ static av_always_inline int get_cabac_bypass_sign_x86(CABACContext *c, int val) "i"(offsetof(CABACContext, bytestream)), "i"(offsetof(CABACContext, bytestream_end)), "i"(offsetof(CABACContext, range)) - : "%eax", "%edx", "memory" + : "eax", "edx", "memory" ); return val; } @@ -291,7 +291,7 @@ static av_always_inline int get_cabac_bypass_x86(CABACContext *c) "i"(offsetof(CABACContext, bytestream)), "i"(offsetof(CABACContext, bytestream_end)), "i"(offsetof(CABACContext, range)) - : "%eax", "%ecx", "memory" + : "eax", "ecx", "memory" ); return res; } diff --git a/libavcodec/x86/fdct.c b/libavcodec/x86/fdct.c index 112566d..6ca7339 100644 --- a/libavcodec/x86/fdct.c +++ b/libavcodec/x86/fdct.c @@ -435,8 +435,8 @@ static av_always_inline void fdct_row_sse2(const int16_t *in, int16_t *out) : : "r" (in), "r" (tab_frw_01234567_sse2.tab_frw_01234567_sse2), "r" (fdct_r_row_sse2.fdct_r_row_sse2), "i" (SHIFT_FRW_ROW), "r" (out) - XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3", - "%xmm4", "%xmm5", "%xmm6", "%xmm7") + XMM_CLOBBERS_ONLY("xmm0", "xmm1", "xmm2", "xmm3", + "xmm4", "xmm5", "xmm6", "xmm7") ); } diff --git a/libavcodec/x86/h264_i386.h b/libavcodec/x86/h264_i386.h index 49ad0e0..f1f0c45 100644 --- a/libavcodec/x86/h264_i386.h +++ b/libavcodec/x86/h264_i386.h @@ -116,7 +116,7 @@ static int decode_significance_x86(CABACContext *c, int max_coeff, "i"(offsetof(CABACContext, bytestream)), "i"(offsetof(CABACContext, bytestream_end)) TABLES_ARG - : "%"REG_c, "memory" + : REG_c, "memory" ); return coeff_count; } @@ -202,7 +202,7 @@ static int decode_significance_8x8_x86(CABACContext *c, "i"(offsetof(CABACContext, bytestream)), "i"(offsetof(CABACContext, bytestream_end)), "i"(H264_LAST_COEFF_FLAG_OFFSET_8x8_OFFSET) TABLES_ARG - : "%"REG_c, "memory" + : REG_c, "memory" ); return coeff_count; } diff --git a/libavcodec/x86/lpc.c b/libavcodec/x86/lpc.c index 3a9493f..0c78d2c 100644 --- a/libavcodec/x86/lpc.c +++ b/libavcodec/x86/lpc.c @@ -73,8 +73,8 @@ static void lpc_apply_welch_window_sse2(const int32_t *data, int len, :"+&r"(i), "+&r"(j) :"r"(w_data+n2), "r"(data+n2), "m"(c), "r"(len) NAMED_CONSTRAINTS_ARRAY_ADD(pd_1,pd_2) - XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3", - "%xmm5", "%xmm6", "%xmm7") + XMM_CLOBBERS_ONLY("xmm0", "xmm1", "xmm2", "xmm3", + "xmm5", "xmm6", "xmm7") ); #undef WELCH } diff --git a/libavcodec/x86/me_cmp_init.c b/libavcodec/x86/me_cmp_init.c index 255df50..ef2aad2 100644 --- a/libavcodec/x86/me_cmp_init.c +++ b/libavcodec/x86/me_cmp_init.c @@ -183,7 +183,7 @@ static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy, "movd %%mm0, %1\n" : "+r" (pix), "=r" (tmp) : "r" (stride), "m" (h) - : "%ecx"); + : "ecx"); return tmp & 0xFFFF; } @@ -264,7 +264,7 @@ static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, "movd %%mm0, %2\n" : "+r" (pix1), "+r" (pix2), "=r" (tmp) : "r" (stride), "m" (h) - : "%ecx"); + : "ecx"); return tmp & 0x7FFF; } diff --git a/libavcodec/x86/mpegvideo.c b/libavcodec/x86/mpegvideo.c index b0028ce..8fa3d7f 100644 --- a/libavcodec/x86/mpegvideo.c +++ b/libavcodec/x86/mpegvideo.c @@ -228,7 +228,7 @@ __asm__ volatile( "add $16, %%"REG_a" \n\t" "js 1b \n\t" ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs) - : "%"REG_a, "memory" + : REG_a, "memory" ); block[0]= block0; } @@ -295,7 +295,7 @@ __asm__ volatile( "add $16, %%"REG_a" \n\t" "js 1b \n\t" ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs) - : "%"REG_a, "memory" + : REG_a, "memory" ); } @@ -359,7 +359,7 @@ __asm__ volatile( "add $16, %%"REG_a" \n\t" "jng 1b \n\t" ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs) - : "%"REG_a, "memory" + : REG_a, "memory" ); block[0]= block0; //Note, we do not do mismatch control for intra as errors cannot accumulate @@ -438,7 +438,7 @@ __asm__ volatile( "movd %%mm0, 124(%0, %3) \n\t" ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "r" (-2*nCoeffs) - : "%"REG_a, "memory" + : REG_a, "memory" ); } diff --git a/libavcodec/x86/mpegvideoenc.c b/libavcodec/x86/mpegvideoenc.c index b410511..1bf01f5 100644 --- a/libavcodec/x86/mpegvideoenc.c +++ b/libavcodec/x86/mpegvideoenc.c @@ -191,8 +191,8 @@ static void denoise_dct_sse2(MpegEncContext *s, int16_t *block){ " jb 1b \n\t" : "+r" (block), "+r" (sum), "+r" (offset) : "r"(block+64) - XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3", - "%xmm4", "%xmm5", "%xmm6", "%xmm7") + XMM_CLOBBERS_ONLY("xmm0", "xmm1", "xmm2", "xmm3", + "xmm4", "xmm5", "xmm6", "xmm7") ); } #endif /* HAVE_INLINE_ASM */ diff --git a/libavcodec/x86/mpegvideoenc_template.c b/libavcodec/x86/mpegvideoenc_template.c index 1899ba2..bfddeff 100644 --- a/libavcodec/x86/mpegvideoenc_template.c +++ b/libavcodec/x86/mpegvideoenc_template.c @@ -179,8 +179,8 @@ static int RENAME(dct_quantize)(MpegEncContext *s, : "+a" (last_non_zero_p1) : "r" (block+64), "r" (qmat), "r" (bias), "r" (inv_zigzag_direct16 + 64), "r" (temp_block + 64) - XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3", - "%xmm4", "%xmm5", "%xmm6", "%xmm7") + XMM_CLOBBERS_ONLY("xmm0", "xmm1", "xmm2", "xmm3", + "xmm4", "xmm5", "xmm6", "xmm7") ); }else{ // FMT_H263 __asm__ volatile( @@ -213,8 +213,8 @@ static int RENAME(dct_quantize)(MpegEncContext *s, : "+a" (last_non_zero_p1) : "r" (block+64), "r" (qmat+64), "r" (bias+64), "r" (inv_zigzag_direct16 + 64), "r" (temp_block + 64) - XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3", - "%xmm4", "%xmm5", "%xmm6", "%xmm7") + XMM_CLOBBERS_ONLY("xmm0", "xmm1", "xmm2", "xmm3", + "xmm4", "xmm5", "xmm6", "xmm7") ); } __asm__ volatile( diff --git a/libavcodec/x86/simple_idct.c b/libavcodec/x86/simple_idct.c index 1d46212..66a7994 100644 --- a/libavcodec/x86/simple_idct.c +++ b/libavcodec/x86/simple_idct.c @@ -1148,7 +1148,7 @@ Temp "9: \n\t" :: "r" (block), "r" (temp), "r" (coeffs) NAMED_CONSTRAINTS_ADD(wm1010,d40000) - : "%eax" + : "eax" ); } diff --git a/libavcodec/x86/snowdsp.c b/libavcodec/x86/snowdsp.c index 2778489..ae793cc 100644 --- a/libavcodec/x86/snowdsp.c +++ b/libavcodec/x86/snowdsp.c @@ -671,8 +671,8 @@ static void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\ :\ "rm"((x86_reg)(src_x<<1)),"m"(obmc),"a"(block),"m"(b_h),"m"(src_stride):\ - XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", )\ - "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d""); + XMM_CLOBBERS("xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", )\ + REG_c,REG_S,REG_D,REG_d); #define snow_inner_add_yblock_sse2_end_8\ "sal $1, %%"REG_c" \n\t"\ @@ -820,7 +820,7 @@ snow_inner_add_yblock_sse2_end_16 :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\ :\ "rm"((x86_reg)(src_x<<1)),"m"(obmc),"a"(block),"m"(b_h),"m"(src_stride):\ - "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d""); + REG_c,REG_S,REG_D,REG_d); static void inner_add_yblock_bw_8_obmc_16_mmx(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h, int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){ diff --git a/libavcodec/x86/vc1dsp_mmx.c b/libavcodec/x86/vc1dsp_mmx.c index 77a8e35..4d1361d 100644 --- a/libavcodec/x86/vc1dsp_mmx.c +++ b/libavcodec/x86/vc1dsp_mmx.c @@ -111,7 +111,7 @@ static void vc1_put_ver_16b_shift2_mmx(int16_t *dst, : "r"(stride), "r"(-2*stride), "m"(shift), "m"(rnd), "r"(9*stride-4) NAMED_CONSTRAINTS_ADD(ff_pw_9) - : "%"REG_c, "memory" + : REG_c, "memory" ); } @@ -215,7 +215,7 @@ static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\ : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\ "g"(stride-offset)\ NAMED_CONSTRAINTS_ADD(ff_pw_9)\ - : "%"REG_c, "memory"\ + : REG_c, "memory"\ );\ } diff --git a/libavcodec/x86/xvididct_sse2.c b/libavcodec/x86/xvididct_sse2.c index 51a5d9d..c19dd82 100644 --- a/libavcodec/x86/xvididct_sse2.c +++ b/libavcodec/x86/xvididct_sse2.c @@ -382,13 +382,13 @@ av_extern_inline void ff_xvid_idct_sse2(short *block) "6: \n\t" : "+r" (block) : NAMED_CONSTRAINTS_ARRAY(m127,iTab1,walkenIdctRounders,iTab2,iTab3,iTab4,tan3,tan1,tan2,sqrt2) - : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", - "%xmm4", "%xmm5", "%xmm6", "%xmm7", ) + : XMM_CLOBBERS("xmm0", "xmm1", "xmm2", "xmm3", + "xmm4", "xmm5", "xmm6", "xmm7", ) #if ARCH_X86_64 - XMM_CLOBBERS("%xmm8", "%xmm9", "%xmm10", "%xmm11", - "%xmm12", "%xmm13", "%xmm14", ) + XMM_CLOBBERS("xmm8", "xmm9", "xmm10", "xmm11", + "xmm12", "xmm13", "xmm14", ) #endif - "%eax", "%ecx", "%edx", "%esi", "memory"); + "eax", "ecx", "edx", "esi", "memory"); } void ff_xvid_idct_sse2_put(uint8_t *dest, int line_size, short *block) diff --git a/libavfilter/libmpcodecs/vf_eq.c b/libavfilter/libmpcodecs/vf_eq.c index 7be1674..ab4fa22 100644 --- a/libavfilter/libmpcodecs/vf_eq.c +++ b/libavfilter/libmpcodecs/vf_eq.c @@ -80,7 +80,7 @@ static void process_MMX(unsigned char *dest, int dstride, unsigned char *src, in "jnz 1b \n\t" : "=r" (src), "=r" (dest) : "0" (src), "1" (dest), "r" (w>>3), "r" (brvec), "r" (contvec) - : "%eax" + : "eax" ); for (i = w&7; i; i--) diff --git a/libavfilter/libmpcodecs/vf_eq2.c b/libavfilter/libmpcodecs/vf_eq2.c index d0a2b92..ecd6324 100644 --- a/libavfilter/libmpcodecs/vf_eq2.c +++ b/libavfilter/libmpcodecs/vf_eq2.c @@ -169,7 +169,7 @@ void affine_1d_MMX (eq2_param_t *par, unsigned char *dst, unsigned char *src, "jnz 1b \n\t" : "=r" (src), "=r" (dst) : "0" (src), "1" (dst), "r" (w >> 3), "r" (brvec), "r" (contvec) - : "%eax" + : "eax" ); for (i = w & 7; i > 0; i--) { diff --git a/libavfilter/x86/vf_noise.c b/libavfilter/x86/vf_noise.c index 0a86cb0..7358bfc 100644 --- a/libavfilter/x86/vf_noise.c +++ b/libavfilter/x86/vf_noise.c @@ -47,7 +47,7 @@ static void line_noise_mmx(uint8_t *dst, const uint8_t *src, "add $8, %%"REG_a" \n\t" " js 1b \n\t" :: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len) - : "%"REG_a + : REG_a ); if (mmx_len != len) ff_line_noise_c(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0); @@ -87,7 +87,7 @@ static void line_noise_avg_mmx(uint8_t *dst, const uint8_t *src, " js 1b \n\t" :: "r" (src+mmx_len), "r" (shift[0]+mmx_len), "r" (shift[1]+mmx_len), "r" (shift[2]+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len) - : "%"REG_a + : REG_a ); if (mmx_len != len){ @@ -119,7 +119,7 @@ static void line_noise_mmxext(uint8_t *dst, const uint8_t *src, "add $8, %%"REG_a" \n\t" " js 1b \n\t" :: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len) - : "%"REG_a + : REG_a ); if (mmx_len != len) ff_line_noise_c(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0); diff --git a/libpostproc/postprocess_template.c b/libpostproc/postprocess_template.c index 6722f96..83fe815 100644 --- a/libpostproc/postprocess_template.c +++ b/libpostproc/postprocess_template.c @@ -207,7 +207,7 @@ static inline int RENAME(vertClassify)(const uint8_t src[], int stride, PPContex : "=r" (numEq), "=r" (dcOk) : "r" (src), "r" ((x86_reg)stride), "m" (c->pQPb) - : "%"REG_a + : REG_a ); numEq= (-numEq) &0xFF; @@ -352,7 +352,7 @@ static inline void RENAME(doVertLowPass)(uint8_t *src, int stride, PPContext *c) : : "r" (src), "r" ((x86_reg)stride), "m" (c->pQPb) - : "%"REG_a, "%"REG_c + : REG_a, REG_c ); #else //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW const int l1= stride; @@ -491,7 +491,7 @@ static inline void RENAME(vertX1Filter)(uint8_t *src, int stride, PPContext *co) : : "r" (src), "r" ((x86_reg)stride), "m" (co->pQPb) NAMED_CONSTRAINTS_ADD(b01) - : "%"REG_a, "%"REG_c + : REG_a, REG_c ); #else //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW @@ -757,7 +757,7 @@ static inline void RENAME(doVertDefFilter)(uint8_t src[], int stride, PPContext : : "r" (src), "r" ((x86_reg)stride), "m" (c->pQPb) NAMED_CONSTRAINTS_ADD(b80,b00,b01) - : "%"REG_a, "%"REG_c + : REG_a, REG_c ); /* @@ -1045,7 +1045,7 @@ static inline void RENAME(doVertDefFilter)(uint8_t src[], int stride, PPContext : "+r" (src) : "r" ((x86_reg)stride), "m" (c->pQPb), "r"(tmp) NAMED_CONSTRAINTS_ADD(w05,w20) - : "%"REG_a + : REG_a ); #else //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW const int l1= stride; @@ -1317,7 +1317,7 @@ DERING_CORE((%0, %1, 8) ,(%%REGd, %1, 4),%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1, "1: \n\t" : : "r" (src), "r" ((x86_reg)stride), "m" (c->pQPb), "m"(c->pQPb2), "q"(tmp) NAMED_CONSTRAINTS_ADD(deringThreshold,b00,b02,b08) - : "%"REG_a, "%"REG_d, "%"REG_SP + : REG_a, REG_d, REG_SP ); #else // HAVE_7REGS && (TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW) int y; @@ -1472,7 +1472,7 @@ static inline void RENAME(deInterlaceInterpolateLinear)(uint8_t src[], int strid "movq %%mm1, (%%"REG_c", %1, 2) \n\t" : : "r" (src), "r" ((x86_reg)stride) - : "%"REG_a, "%"REG_c + : REG_a, REG_c ); #else int a, b, x; @@ -1562,9 +1562,9 @@ DEINT_CUBIC((%%REGd, %1), (%0, %1, 8) , (%%REGd, %1, 4), (%%REGc) , (%%REGc, : : "r" (src), "r" ((x86_reg)stride) : #if TEMPLATE_PP_SSE2 - XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm7",) + XMM_CLOBBERS("xmm0", "xmm1", "xmm2", "xmm3", "xmm7",) #endif - "%"REG_a, "%"REG_d, "%"REG_c + REG_a, REG_d, REG_c ); #undef REAL_DEINT_CUBIC #else //TEMPLATE_PP_SSE2 || TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW @@ -1636,7 +1636,7 @@ DEINT_FF((%%REGd, %1), (%%REGd, %1, 2), (%0, %1, 8) , (%%REGd, %1, 4)) "movq %%mm0, (%2) \n\t" : : "r" (src), "r" ((x86_reg)stride), "r"(tmp) - : "%"REG_a, "%"REG_d + : REG_a, REG_d ); #else //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW int x; @@ -1726,7 +1726,7 @@ DEINT_L5(%%mm1, %%mm0, (%%REGd, %1, 2), (%0, %1, 8) , (%%REGd, %1, 4)) "movq %%mm0, (%2) \n\t" "movq %%mm1, (%3) \n\t" : : "r" (src), "r" ((x86_reg)stride), "r"(tmp), "r"(tmp2) - : "%"REG_a, "%"REG_d + : REG_a, REG_d ); #else //(TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW) && HAVE_6REGS int x; @@ -1814,7 +1814,7 @@ static inline void RENAME(deInterlaceBlendLinear)(uint8_t src[], int stride, uin "movq %%mm1, (%2) \n\t" : : "r" (src), "r" ((x86_reg)stride), "r" (tmp) - : "%"REG_a, "%"REG_d + : REG_a, REG_d ); #else //TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW int a, b, c, x; @@ -1918,7 +1918,7 @@ static inline void RENAME(deInterlaceMedian)(uint8_t src[], int stride) : : "r" (src), "r" ((x86_reg)stride) - : "%"REG_a, "%"REG_d + : REG_a, REG_d ); #else // MMX without MMX2 @@ -1960,7 +1960,7 @@ MEDIAN((%0, %1, 4) , (%%REGd) , (%%REGd, %1)) MEDIAN((%%REGd, %1), (%%REGd, %1, 2), (%0, %1, 8)) : : "r" (src), "r" ((x86_reg)stride) - : "%"REG_a, "%"REG_d + : REG_a, REG_d ); #endif //TEMPLATE_PP_MMXEXT #else //TEMPLATE_PP_MMX @@ -2067,7 +2067,7 @@ static inline void RENAME(transpose1)(uint8_t *dst1, uint8_t *dst2, const uint8_ :: "r" (src), "r" ((x86_reg)srcStride), "r" (dst1), "r" (dst2) - : "%"REG_a + : REG_a ); } @@ -2147,7 +2147,7 @@ static inline void RENAME(transpose2)(uint8_t *dst, int dstStride, const uint8_t "movd %%mm1, 4(%%"REG_d", %1, 2) \n\t" :: "r" (dst), "r" ((x86_reg)dstStride), "r" (src) - : "%"REG_a, "%"REG_d + : REG_a, REG_d ); } #endif //TEMPLATE_PP_MMX @@ -2451,7 +2451,7 @@ L2_DIFF_CORE((%0, %%REGc) , (%1, %%REGc)) :: "r" (src), "r" (tempBlurred), "r"((x86_reg)stride), "m" (tempBlurredPast) NAMED_CONSTRAINTS_ADD(b80) - : "%"REG_a, "%"REG_d, "%"REG_c, "memory" + : REG_a, REG_d, REG_c, "memory" ); #else //(TEMPLATE_PP_MMXEXT || TEMPLATE_PP_3DNOW) && HAVE_6REGS { @@ -2651,7 +2651,7 @@ static av_always_inline void RENAME(do_a_deblock)(uint8_t *src, int step, int st : "=m" (eq_mask), "=m" (dc_mask) : "r" (src), "r" ((x86_reg)step), "m" (c->pQPb), "m"(c->ppMode.flatnessThreshold) - : "%"REG_a + : REG_a ); both_masks = dc_mask & eq_mask; @@ -3068,7 +3068,7 @@ static av_always_inline void RENAME(do_a_deblock)(uint8_t *src, int step, int st : "+r" (temp_src) : "r" ((x86_reg)step), "m" (c->pQPb), "m"(eq_mask), "r"(tmp) NAMED_CONSTRAINTS_ADD(w05,w20) - : "%"REG_a + : REG_a ); } /*if(step==16){ @@ -3172,7 +3172,7 @@ SCALED_CPY((%%REGa, %4), (%%REGa, %4, 2), (%%REGd, %5), (%%REGd, %5, 2)) "r"(dst), "r" ((x86_reg)srcStride), "r" ((x86_reg)dstStride) - : "%"REG_d + : REG_d ); #else //TEMPLATE_PP_MMX && HAVE_6REGS for(i=0; i<8; i++) @@ -3205,7 +3205,7 @@ SIMPLE_CPY((%%REGa, %2), (%%REGa, %2, 2), (%%REGd, %3), (%%REGd, %3, 2)) "r" (dst), "r" ((x86_reg)srcStride), "r" ((x86_reg)dstStride) - : "%"REG_a, "%"REG_d + : REG_a, REG_d ); #else //TEMPLATE_PP_MMX && HAVE_6REGS for(i=0; i<8; i++) @@ -3393,7 +3393,7 @@ static void RENAME(postProcess)(const uint8_t src[], int srcStride, uint8_t dst[ "prefetcht0 32(%%"REG_d", %2) \n\t" :: "r" (srcBlock), "r" ((x86_reg)srcStride), "r" (dstBlock), "r" ((x86_reg)dstStride), "g" ((x86_reg)x), "g" ((x86_reg)copyAhead) - : "%"REG_a, "%"REG_d + : REG_a, REG_d ); #elif TEMPLATE_PP_3DNOW @@ -3529,7 +3529,7 @@ static void RENAME(postProcess)(const uint8_t src[], int srcStride, uint8_t dst[ "prefetcht0 32(%%"REG_d", %2) \n\t" :: "r" (srcBlock), "r" ((x86_reg)srcStride), "r" (dstBlock), "r" ((x86_reg)dstStride), "g" ((x86_reg)x), "g" ((x86_reg)copyAhead) - : "%"REG_a, "%"REG_d + : REG_a, REG_d ); #elif TEMPLATE_PP_3DNOW diff --git a/libswscale/x86/hscale_fast_bilinear_simd.c b/libswscale/x86/hscale_fast_bilinear_simd.c index 103793d..98bd11f 100644 --- a/libswscale/x86/hscale_fast_bilinear_simd.c +++ b/libswscale/x86/hscale_fast_bilinear_simd.c @@ -276,9 +276,9 @@ void ff_hyscale_fast_mmxext(SwsContext *c, int16_t *dst, #if ARCH_X86_64 ,"m"(retsave) #endif - : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D + : REG_a, REG_c, REG_d, REG_S, REG_D #if !defined(PIC) - ,"%"REG_b + ,REG_b #endif ); @@ -360,9 +360,9 @@ void ff_hcscale_fast_mmxext(SwsContext *c, int16_t *dst1, int16_t *dst2, #if ARCH_X86_64 ,"m"(retsave) #endif - : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D + : REG_a, REG_c, REG_d, REG_S, REG_D #if !defined(PIC) - ,"%"REG_b + ,REG_b #endif ); diff --git a/libswscale/x86/rgb2rgb_template.c b/libswscale/x86/rgb2rgb_template.c index e71c7eb..9263068 100644 --- a/libswscale/x86/rgb2rgb_template.c +++ b/libswscale/x86/rgb2rgb_template.c @@ -1203,7 +1203,7 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u "cmp %4, %%"REG_a" \n\t" " jb 1b \n\t" ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth) - : "%"REG_a + : REG_a ); if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) { usrc += chromStride; @@ -1268,7 +1268,7 @@ static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *u "cmp %4, %%"REG_a" \n\t" " jb 1b \n\t" ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth) - : "%"REG_a + : REG_a ); if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) { usrc += chromStride; @@ -1374,7 +1374,7 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t "cmp %4, %%"REG_a" \n\t" " jb 1b \n\t" ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth) - : "memory", "%"REG_a + : "memory", REG_a ); ydst += lumStride; @@ -1404,7 +1404,7 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t " jb 1b \n\t" ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth) - : "memory", "%"REG_a + : "memory", REG_a ); udst += chromStride; vdst += chromStride; @@ -1479,7 +1479,7 @@ static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, int srcWid "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2), "g" (-mmxSize) NAMED_CONSTRAINTS_ADD(mmx_ff) - : "%"REG_a + : REG_a ); for (x=mmxSize-1; x<srcWidth-1; x++) { @@ -1573,7 +1573,7 @@ static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t "cmp %4, %%"REG_a" \n\t" " jb 1b \n\t" ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth) - : "memory", "%"REG_a + : "memory", REG_a ); ydst += lumStride; @@ -1603,7 +1603,7 @@ static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t " jb 1b \n\t" ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth) - : "memory", "%"REG_a + : "memory", REG_a ); udst += chromStride; vdst += chromStride; @@ -1711,7 +1711,7 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_ " js 1b \n\t" : : "r" (src+width*3), "r" (ydst+width), "g" ((x86_reg)-width), "r"(rgb2yuv) NAMED_CONSTRAINTS_ADD(ff_w1111,ff_bgr2YOffset) - : "%"REG_a, "%"REG_d + : REG_a, REG_d ); ydst += lumStride; src += srcStride; @@ -1860,7 +1860,7 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_ " js 1b \n\t" : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth), "r"(rgb2yuv) NAMED_CONSTRAINTS_ADD(ff_w1111,ff_bgr2UVOffset) - : "%"REG_a, "%"REG_d + : REG_a, REG_d ); udst += chromStride; @@ -1905,7 +1905,7 @@ static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, ui "cmp %3, %%"REG_a" \n\t" " jb 1b \n\t" ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15) - : "memory", "%"REG_a"" + : "memory", REG_a ); #else __asm__( @@ -1931,7 +1931,7 @@ static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, ui "cmp %3, %%"REG_a" \n\t" " jb 1b \n\t" ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15) - : "memory", "%"REG_a + : "memory", REG_a ); #endif for (w= (width&(~15)); w < width; w++) { diff --git a/libswscale/x86/swscale.c b/libswscale/x86/swscale.c index fe5c4ea..416b533 100644 --- a/libswscale/x86/swscale.c +++ b/libswscale/x86/swscale.c @@ -257,8 +257,8 @@ static void yuv2yuvX_sse3(const int16_t *filter, int filterSize, :: "g" (filter), "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" (offset), "m"(filterSize), "m"(((uint64_t *) dither)[0]) - : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , "%xmm4" , "%xmm5" , "%xmm7" ,) - "%"REG_d, "%"REG_S, "%"REG_c + : XMM_CLOBBERS("xmm0" , "xmm1" , "xmm2" , "xmm3" , "xmm4" , "xmm5" , "xmm7" ,) + REG_d, REG_S, REG_c ); } else { __asm__ volatile( @@ -267,8 +267,8 @@ static void yuv2yuvX_sse3(const int16_t *filter, int filterSize, :: "g" (filter), "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" (offset), "m"(filterSize), "m"(((uint64_t *) dither)[0]) - : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , "%xmm4" , "%xmm5" , "%xmm7" ,) - "%"REG_d, "%"REG_S, "%"REG_c + : XMM_CLOBBERS("xmm0" , "xmm1" , "xmm2" , "xmm3" , "xmm4" , "xmm5" , "xmm7" ,) + REG_d, REG_S, REG_c ); } } diff --git a/libswscale/x86/swscale_template.c b/libswscale/x86/swscale_template.c index 36a606c..278655c 100644 --- a/libswscale/x86/swscale_template.c +++ b/libswscale/x86/swscale_template.c @@ -116,7 +116,7 @@ static void RENAME(yuv2yuvX)(const int16_t *filter, int filterSize, "jb 1b \n\t"\ :: "g" (filter), "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" (offset) - : "%"REG_d, "%"REG_S, "%"REG_c + : REG_d, REG_S, REG_c ); } @@ -173,7 +173,7 @@ static void RENAME(yuv2yuvX)(const int16_t *filter, int filterSize, "m" (dummy), "m" (dummy), "m" (dummy),\ "r" (dest), "m" (dstW_reg), "m"(uv_off) \ NAMED_CONSTRAINTS_ADD(bF8,bFC) \ - : "%"REG_a, "%"REG_d, "%"REG_S \ + : REG_a, REG_d, REG_S \ ); #define YSCALEYUV2PACKEDX_ACCURATE_UV \ @@ -683,7 +683,7 @@ static void RENAME(yuv2bgr24_X_ar)(SwsContext *c, const int16_t *lumFilter, "m" (dummy), "m" (dummy), "m" (dummy), "r" (dest), "m" (dstW_reg), "m"(uv_off) NAMED_CONSTRAINTS_ADD(ff_M24A,ff_M24C,ff_M24B) - : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S + : REG_a, REG_c, REG_d, REG_S ); } @@ -708,7 +708,7 @@ static void RENAME(yuv2bgr24_X)(SwsContext *c, const int16_t *lumFilter, "m" (dummy), "m" (dummy), "m" (dummy), "r" (dest), "m" (dstW_reg), "m"(uv_off) NAMED_CONSTRAINTS_ADD(ff_M24A,ff_M24C,ff_M24B) - : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S + : REG_a, REG_c, REG_d, REG_S ); } #endif /* HAVE_6REGS */ @@ -873,7 +873,7 @@ static void RENAME(yuv2rgb32_2)(SwsContext *c, const int16_t *buf[2], :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "r" (dest), "a" (&c->redDither), "r" (abuf0), "r" (abuf1) - : "%r8" + : "r8" ); #else c->u_temp=(intptr_t)abuf0; -- 2.1.4 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel