diff --git a/libavcodec/aarch64/h264pred_init.c b/libavcodec/aarch64/h264pred_init.c index 0ae8f70d23..9819f85f7d 100644 --- a/libavcodec/aarch64/h264pred_init.c +++ b/libavcodec/aarch64/h264pred_init.c @@ -25,6 +25,19 @@ #include "libavcodec/avcodec.h" #include "libavcodec/h264pred.h" +/* PERFORMANCE WARNING: + * These assembly optimizations have been identified as "performance regressions." + * Due to advancements in modern CPU micro-architectures and compiler optimization + * the C implementations now consistently outperform these handwritten routines. + * + * Keep them here for historical reference. + * + * New optimizations are highly welcome! If you can provide an optimized + * implementation that demonstrably beats the current C version in rigorous + * benchmarks, please submit a patch. + */ +#define ENABLE_INEFFICIENT_ASM 0 + void ff_pred16x16_vert_neon(uint8_t *src, ptrdiff_t stride); void ff_pred16x16_hor_neon(uint8_t *src, ptrdiff_t stride); void ff_pred16x16_plane_neon(uint8_t *src, ptrdiff_t stride); @@ -69,16 +82,22 @@ static av_cold void h264_pred_init_neon(H264PredContext *h, int codec_id, { if (bit_depth == 8) { if (chroma_format_idc <= 1) { +#if ENABLE_INEFFICIENT_ASM h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vert_neon; h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_hor_neon; +#endif if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) h->pred8x8[PLANE_PRED8x8] = ff_pred8x8_plane_neon; +#if ENABLE_INEFFICIENT_ASM h->pred8x8[DC_128_PRED8x8 ] = ff_pred8x8_128_dc_neon; +#endif if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) { +#if ENABLE_INEFFICIENT_ASM h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_neon; h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon; h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon; +#endif h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8] = ff_pred8x8_l0t_dc_neon; h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8] = ff_pred8x8_0lt_dc_neon; h->pred8x8[ALZHEIMER_DC_L00_PRED8x8] = ff_pred8x8_l00_dc_neon; @@ -86,27 +105,37 @@ static av_cold void h264_pred_init_neon(H264PredContext *h, int codec_id, } } +#if ENABLE_INEFFICIENT_ASM h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_neon; +#endif h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vert_neon; h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_hor_neon; +#if ENABLE_INEFFICIENT_ASM h->pred16x16[LEFT_DC_PRED8x8] = ff_pred16x16_left_dc_neon; h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_neon; h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_neon; +#endif if (codec_id != AV_CODEC_ID_SVQ3 && codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_neon; } if (bit_depth == 10) { if (chroma_format_idc <= 1) { +#if ENABLE_INEFFICIENT_ASM h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vert_neon_10; +#endif h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_hor_neon_10; if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) h->pred8x8[PLANE_PRED8x8] = ff_pred8x8_plane_neon_10; +#if ENABLE_INEFFICIENT_ASM h->pred8x8[DC_128_PRED8x8 ] = ff_pred8x8_128_dc_neon_10; +#endif if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) { +#if ENABLE_INEFFICIENT_ASM h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_neon_10; h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon_10; +#endif h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon_10; h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8] = ff_pred8x8_l0t_dc_neon_10; h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8] = ff_pred8x8_0lt_dc_neon_10;