Test for presence of sps->vui parameters

This commit is contained in:
loki
2021-06-12 23:08:20 +02:00
parent 55ce46e6ed
commit 30f7742f51
54 changed files with 21645 additions and 3 deletions
+157
View File
@@ -0,0 +1,157 @@
/*
* copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_X86_ASM_H
#define AVUTIL_X86_ASM_H
#include <stdint.h>
typedef struct xmm_reg {
uint64_t a, b;
} xmm_reg;
typedef struct ymm_reg {
uint64_t a, b, c, d;
} ymm_reg;
#if ARCH_X86_64
#define FF_OPSIZE "q"
#define FF_REG_a "rax"
#define FF_REG_b "rbx"
#define FF_REG_c "rcx"
#define FF_REG_d "rdx"
#define FF_REG_D "rdi"
#define FF_REG_S "rsi"
#define FF_PTR_SIZE "8"
typedef int64_t x86_reg;
/* FF_REG_SP is defined in Solaris sys headers, so use FF_REG_sp */
#define FF_REG_sp "rsp"
#define FF_REG_BP "rbp"
#define FF_REGBP rbp
#define FF_REGa rax
#define FF_REGb rbx
#define FF_REGc rcx
#define FF_REGd rdx
#define FF_REGSP rsp
#elif ARCH_X86_32
#define FF_OPSIZE "l"
#define FF_REG_a "eax"
#define FF_REG_b "ebx"
#define FF_REG_c "ecx"
#define FF_REG_d "edx"
#define FF_REG_D "edi"
#define FF_REG_S "esi"
#define FF_PTR_SIZE "4"
typedef int32_t x86_reg;
#define FF_REG_sp "esp"
#define FF_REG_BP "ebp"
#define FF_REGBP ebp
#define FF_REGa eax
#define FF_REGb ebx
#define FF_REGc ecx
#define FF_REGd edx
#define FF_REGSP esp
#else
typedef int x86_reg;
#endif
#define HAVE_7REGS (ARCH_X86_64 || (HAVE_EBX_AVAILABLE && HAVE_EBP_AVAILABLE))
#define HAVE_6REGS (ARCH_X86_64 || (HAVE_EBX_AVAILABLE || HAVE_EBP_AVAILABLE))
#if ARCH_X86_64 && defined(PIC)
#define BROKEN_RELOCATIONS 1
#endif
/*
* If gcc is not set to support sse (-msse) it will not accept xmm registers
* in the clobber list for inline asm. XMM_CLOBBERS takes a list of xmm
* registers to be marked as clobbered and evaluates to nothing if they are
* not supported, or to the list itself if they are supported. Since a clobber
* list may not be empty, XMM_CLOBBERS_ONLY should be used if the xmm
* registers are the only in the clobber list.
* For example a list with "eax" and "xmm0" as clobbers should become:
* : XMM_CLOBBERS("xmm0",) "eax"
* and a list with only "xmm0" should become:
* XMM_CLOBBERS_ONLY("xmm0")
*/
#if HAVE_XMM_CLOBBERS
#define XMM_CLOBBERS(...) __VA_ARGS__
#define XMM_CLOBBERS_ONLY(...) : __VA_ARGS__
#else
#define XMM_CLOBBERS(...)
#define XMM_CLOBBERS_ONLY(...)
#endif
/* Use to export labels from asm. */
#define LABEL_MANGLE(a) EXTERN_PREFIX #a
// Use rip-relative addressing if compiling PIC code on x86-64.
#if ARCH_X86_64 && defined(PIC)
#define LOCAL_MANGLE(a) #a "(%%rip)"
#else
#define LOCAL_MANGLE(a) #a
#endif
#if HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS
#define MANGLE(a) EXTERN_PREFIX LOCAL_MANGLE(a)
#define NAMED_CONSTRAINTS_ADD(...)
#define NAMED_CONSTRAINTS(...)
#define NAMED_CONSTRAINTS_ARRAY_ADD(...)
#define NAMED_CONSTRAINTS_ARRAY(...)
#else
/* When direct symbol references are used in code passed to a compiler that does not support them
* then these references need to be converted to named asm constraints instead.
* Instead of returning a direct symbol MANGLE now returns a named constraint for that specific symbol.
* In order for this to work there must also be a corresponding entry in the asm-interface. To add this
* entry use the macro NAMED_CONSTRAINTS() and pass in a list of each symbol reference used in the
* corresponding block of code. (e.g. NAMED_CONSTRAINTS(var1,var2,var3) where var1 is the first symbol etc. ).
* If there are already existing constraints then use NAMED_CONSTRAINTS_ADD to add to the existing constraint list.
*/
#define MANGLE(a) "%[" #a "]"
// Intel/MSVC does not correctly expand va-args so we need a rather ugly hack in order to get it to work
#define FE_0(P, X) P(X)
#define FE_1(P, X, X1) P(X), FE_0(P, X1)
#define FE_2(P, X, X1, X2) P(X), FE_1(P, X1, X2)
#define FE_3(P, X, X1, X2, X3) P(X), FE_2(P, X1, X2, X3)
#define FE_4(P, X, X1, X2, X3, X4) P(X), FE_3(P, X1, X2, X3, X4)
#define FE_5(P, X, X1, X2, X3, X4, X5) P(X), FE_4(P, X1, X2, X3, X4, X5)
#define FE_6(P, X, X1, X2, X3, X4, X5, X6) P(X), FE_5(P, X1, X2, X3, X4, X5, X6)
#define FE_7(P, X, X1, X2, X3, X4, X5, X6, X7) P(X), FE_6(P, X1, X2, X3, X4, X5, X6, X7)
#define FE_8(P, X, X1, X2, X3, X4, X5, X6, X7, X8) P(X), FE_7(P, X1, X2, X3, X4, X5, X6, X7, X8)
#define FE_9(P, X, X1, X2, X3, X4, X5, X6, X7, X8, X9) P(X), FE_8(P, X1, X2, X3, X4, X5, X6, X7, X8, X9)
#define GET_FE_IMPL(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, NAME, ...) NAME
#define GET_FE(A) GET_FE_IMPL A
#define GET_FE_GLUE(x, y) x y
#define FOR_EACH_VA(P, ...) GET_FE_GLUE(GET_FE((__VA_ARGS__, FE_9, FE_8, FE_7, FE_6, FE_5, FE_4, FE_3, FE_2, FE_1, FE_0)), (P, __VA_ARGS__))
#define NAME_CONSTRAINT(x) [x] "m"(x)
// Parameters are a list of each symbol reference required
#define NAMED_CONSTRAINTS_ADD(...) , FOR_EACH_VA(NAME_CONSTRAINT, __VA_ARGS__)
// Same but without comma for when there are no previously defined constraints
#define NAMED_CONSTRAINTS(...) FOR_EACH_VA(NAME_CONSTRAINT, __VA_ARGS__)
// Same as above NAMED_CONSTRAINTS except used for passing arrays/pointers instead of normal variables
#define NAME_CONSTRAINT_ARRAY(x) [x] "m"(*x)
#define NAMED_CONSTRAINTS_ARRAY_ADD(...) , FOR_EACH_VA(NAME_CONSTRAINT_ARRAY, __VA_ARGS__)
#define NAMED_CONSTRAINTS_ARRAY(...) FOR_EACH_VA(NAME_CONSTRAINT_ARRAY, __VA_ARGS__)
#endif
#endif /* AVUTIL_X86_ASM_H */
+145
View File
@@ -0,0 +1,145 @@
/*
* Copyright (c) 2015 James Almer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_X86_INTMATH_H
#define AVUTIL_X86_INTMATH_H
#include <stdint.h>
#include <stdlib.h>
#if HAVE_FAST_CLZ
#if defined(_MSC_VER)
#include <intrin.h>
#elif defined(__INTEL_COMPILER)
#include <immintrin.h>
#endif
#endif
#if HAVE_FAST_CLZ
#if(defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1216)) || defined(_MSC_VER)
#if defined(__INTEL_COMPILER)
#define ff_log2(x) (_bit_scan_reverse((x) | 1))
#else
#define ff_log2 ff_log2_x86
static av_always_inline av_const int ff_log2_x86(unsigned int v) {
unsigned long n;
_BitScanReverse(&n, v | 1);
return n;
}
#endif
#define ff_log2_16bit av_log2
#if defined(__INTEL_COMPILER) || (defined(_MSC_VER) && (_MSC_VER >= 1700) && \
(defined(__BMI__) || !defined(__clang__)))
#define ff_ctz(v) _tzcnt_u32(v)
#if ARCH_X86_64
#define ff_ctzll(v) _tzcnt_u64(v)
#else
#define ff_ctzll ff_ctzll_x86
static av_always_inline av_const int ff_ctzll_x86(long long v) {
return ((uint32_t)v == 0) ? _tzcnt_u32((uint32_t)(v >> 32)) + 32 : _tzcnt_u32((uint32_t)v);
}
#endif
#endif /* _MSC_VER */
#endif /* __INTEL_COMPILER */
#endif /* HAVE_FAST_CLZ */
#if defined(__GNUC__)
/* Our generic version of av_popcount is faster than GCC's built-in on
* CPUs that don't support the popcnt instruction.
*/
#if defined(__POPCNT__)
#define av_popcount __builtin_popcount
#if ARCH_X86_64
#define av_popcount64 __builtin_popcountll
#endif
#endif /* __POPCNT__ */
#if defined(__BMI2__)
#if AV_GCC_VERSION_AT_LEAST(5, 1)
#define av_mod_uintp2 __builtin_ia32_bzhi_si
#elif HAVE_INLINE_ASM
/* GCC releases before 5.1.0 have a broken bzhi builtin, so for those we
* implement it using inline assembly
*/
#define av_mod_uintp2 av_mod_uintp2_bmi2
static av_always_inline av_const unsigned av_mod_uintp2_bmi2(unsigned a, unsigned p) {
if(av_builtin_constant_p(p))
return a & ((1 << p) - 1);
else {
unsigned x;
__asm__("bzhi %2, %1, %0 \n\t"
: "=r"(x)
: "rm"(a), "r"(p));
return x;
}
}
#endif /* AV_GCC_VERSION_AT_LEAST */
#endif /* __BMI2__ */
#ifdef av_clipd
#undef av_clipd
#endif
#ifdef av_clipf
#undef av_clipf
#endif
#if defined(__SSE2__) && !defined(__INTEL_COMPILER)
#define av_clipd av_clipd_sse2
static av_always_inline av_const double av_clipd_sse2(double a, double amin, double amax) {
#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
if(amin > amax) abort();
#endif
__asm__("minsd %2, %0 \n\t"
"maxsd %1, %0 \n\t"
: "+&x"(a)
: "xm"(amin), "xm"(amax));
return a;
}
#endif /* __SSE2__ */
#if defined(__SSE__) && !defined(__INTEL_COMPILER)
#define av_clipf av_clipf_sse
static av_always_inline av_const float av_clipf_sse(float a, float amin, float amax) {
#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
if(amin > amax) abort();
#endif
__asm__("minss %2, %0 \n\t"
"maxss %1, %0 \n\t"
: "+&x"(a)
: "xm"(amin), "xm"(amax));
return a;
}
#endif /* __SSE__ */
#endif /* __GNUC__ */
#endif /* AVUTIL_X86_INTMATH_H */
+121
View File
@@ -0,0 +1,121 @@
/*
* simple math operations
* Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_X86_MATHOPS_H
#define AVCODEC_X86_MATHOPS_H
#include <libavutil/common.h>
#include "asm.h"
#if HAVE_INLINE_ASM
#if ARCH_X86_32
#define MULL MULL
static av_always_inline av_const int MULL(int a, int b, unsigned shift) {
int rt, dummy;
__asm__(
"imull %3 \n\t"
"shrdl %4, %%edx, %%eax \n\t"
: "=a"(rt), "=d"(dummy)
: "a"(a), "rm"(b), "ci"((uint8_t)shift));
return rt;
}
#define MULH MULH
static av_always_inline av_const int MULH(int a, int b) {
int rt, dummy;
__asm__(
"imull %3"
: "=d"(rt), "=a"(dummy)
: "a"(a), "rm"(b));
return rt;
}
#define MUL64 MUL64
static av_always_inline av_const int64_t MUL64(int a, int b) {
int64_t rt;
__asm__(
"imull %2"
: "=A"(rt)
: "a"(a), "rm"(b));
return rt;
}
#endif /* ARCH_X86_32 */
#if HAVE_I686
/* median of 3 */
#define mid_pred mid_pred
static inline av_const int mid_pred(int a, int b, int c) {
int i = b;
__asm__(
"cmp %2, %1 \n\t"
"cmovg %1, %0 \n\t"
"cmovg %2, %1 \n\t"
"cmp %3, %1 \n\t"
"cmovl %3, %1 \n\t"
"cmp %1, %0 \n\t"
"cmovg %1, %0 \n\t"
: "+&r"(i), "+&r"(a)
: "r"(b), "r"(c));
return i;
}
#if HAVE_6REGS
#define COPY3_IF_LT(x, y, a, b, c, d) \
__asm__ volatile( \
"cmpl %0, %3 \n\t" \
"cmovl %3, %0 \n\t" \
"cmovl %4, %1 \n\t" \
"cmovl %5, %2 \n\t" \
: "+&r"(x), "+&r"(a), "+r"(c) \
: "r"(y), "r"(b), "r"(d));
#endif /* HAVE_6REGS */
#endif /* HAVE_I686 */
#define MASK_ABS(mask, level) \
__asm__("cdq \n\t" \
"xorl %1, %0 \n\t" \
"subl %1, %0 \n\t" \
: "+a"(level), "=&d"(mask))
// avoid +32 for shift optimization (gcc should do that ...)
#define NEG_SSR32 NEG_SSR32
static inline int32_t NEG_SSR32(int32_t a, int8_t s) {
__asm__("sarl %1, %0\n\t"
: "+r"(a)
: "ic"((uint8_t)(-s)));
return a;
}
#define NEG_USR32 NEG_USR32
static inline uint32_t NEG_USR32(uint32_t a, int8_t s) {
__asm__("shrl %1, %0\n\t"
: "+r"(a)
: "ic"((uint8_t)(-s)));
return a;
}
#endif /* HAVE_INLINE_ASM */
#endif /* AVCODEC_X86_MATHOPS_H */