Skip to content
Snippets Groups Projects
Commit 3ba228ad authored by Janne Mareike Koschinski's avatar Janne Mareike Koschinski
Browse files

make the code less bad

parent 5dea25aa
Branches
No related tags found
No related merge requests found
use ffmpeg_dev::sys as ffi;
use failure::bail;
use enum_primitive::*;
use std::marker::PhantomData;
use fraction::Fraction;
use crate::ffmpeg_api::enums::*;
// TODO: Use proper errors (with struct etc) for this
enum_from_primitive! {
#[derive(Debug, Copy, Clone, PartialEq)]
#[repr(i32)]
pub enum AVErrorKind {
Unknown = ffi::AVERROR_EXPERIMENTAL,
InputChanged = ffi::AVERROR_INPUT_CHANGED,
OutputChanged = ffi::AVERROR_OUTPUT_CHANGED
}
}
pub struct AVFormatContext {
base: *mut ffi::AVFormatContext,
}
impl<'a> AVFormatContext {
pub fn new() -> Result<Self, failure::Error> {
let base = unsafe { ffi::avformat_alloc_context() };
if base.is_null() {
bail!("avformat_alloc_context() failed");
}
Ok(AVFormatContext { base })
}
// TODO: Just for testing
pub unsafe fn raw(&self) -> *mut ffi::AVFormatContext {
self.base
}
pub fn open_input(&mut self, path: &str) -> Result<(), failure::Error> {
match unsafe {
ffi::avformat_open_input(
&mut self.base,
std::ffi::CString::new(path)
.map_err(|_| failure::format_err!("Could not convert path to c string"))?
.as_ptr(),
std::ptr::null_mut(),
std::ptr::null_mut(),
)
} {
0 => Ok(()),
_ => bail!("Could not open input")
}
}
pub fn streams(&self) -> Vec<AVStream> {
return unsafe {
std::slice::from_raw_parts(
(*self.base).streams,
(*self.base).nb_streams as usize,
)
}
.iter()
.map(|stream| {
AVStream::new(unsafe { (*stream).as_mut() }.expect("not null"), self)
})
.collect();
}
}
impl Drop for AVFormatContext {
fn drop(&mut self) {
unsafe { ffi::avformat_free_context(self.base) }
}
}
pub struct AVBuffer {
base: *mut u8,
size: usize,
}
impl AVBuffer {
pub fn new(size: usize) -> Result<Self, failure::Error> {
let base = unsafe { ffi::av_malloc(size) } as *mut u8;
if base.is_null() {
bail!("av_malloc() failed");
}
Ok(AVBuffer { base, size })
}
pub fn empty() -> Self {
AVBuffer { base: std::ptr::null_mut(), size: 0 }
}
pub fn data(&self) -> &[u8] {
unsafe {
std::slice::from_raw_parts(self.base, self.size)
}
}
pub fn data_mut(&mut self) -> &[u8] {
unsafe {
std::slice::from_raw_parts_mut(self.base, self.size)
}
}
}
pub struct AVFrame {
base: *mut ffi::AVFrame,
buffer: AVBuffer,
}
impl AVFrame {
pub fn new() -> Result<Self, failure::Error> {
let base = unsafe { ffi::av_frame_alloc() };
if base.is_null() {
bail!("avformat_alloc_frame() failed");
}
Ok(AVFrame { base, buffer: AVBuffer::empty() })
}
// TODO: Just for testing
pub unsafe fn as_mut(&mut self) -> &mut ffi::AVFrame {
self.base.as_mut().expect("not null")
}
pub fn init(&mut self, width: i32, height: i32, format: AVPixelFormat) -> Result<(), failure::Error>{
let mut base = unsafe { self.base.as_mut() }.expect("not null");
base.width = width;
base.height = height;
base.format = format as ffi::AVPixelFormat;
self.buffer = AVBuffer::new(self.size())?;
unsafe {
ffi::avpicture_fill(
self.base as *mut ffi::AVPicture,
self.buffer.base as *mut u8,
self.format() as ffi::AVPixelFormat,
self.width(),
self.height(),
)
};
Ok(())
}
pub fn width(&self) -> i32 {
let base = unsafe { self.base.as_ref() }.expect("not null");
base.width
}
pub fn height(&self) -> i32 {
let base = unsafe { self.base.as_ref() }.expect("not null");
base.height
}
pub fn format(&self) -> AVPixelFormat {
let base = unsafe { self.base.as_ref() }.expect("not null");
AVPixelFormat::from_i32(base.format)
.unwrap_or(AVPixelFormat::NONE)
}
pub fn size(&self) -> usize {
unsafe {
ffi::avpicture_get_size(self.format() as ffi::AVPixelFormat, self.width(), self.height()) as usize
}
}
pub fn key_frame(&self) -> bool {
let base = unsafe { self.base.as_ref() }.expect("not null");
base.key_frame != 0
}
pub fn pts(&self) -> i64 {
let base = unsafe { self.base.as_ref() }.expect("not null");
base.pts
}
pub fn coded_picture_number(&self) -> i32 {
let base = unsafe { self.base.as_ref() }.expect("not null");
base.coded_picture_number
}
pub fn display_picture_number(&self) -> i32 {
let base = unsafe { self.base.as_ref() }.expect("not null");
base.display_picture_number
}
pub fn linesize(&self) -> &[i32] {
let base = unsafe { self.base.as_ref() }.expect("not null");
&base.linesize
}
pub fn data_ptr(&self) -> *const *const u8 {
let base = unsafe { self.base.as_ref() }.expect("not null");
base.data.as_ptr() as *const *const u8
}
pub fn data_mut_ptr(&mut self) -> *mut *mut u8 {
let base = unsafe { self.base.as_mut() }.expect("not null");
base.data.as_mut_ptr() as *mut *mut u8
}
pub fn data(&self, index: usize) -> &[u8] {
let base = unsafe { self.base.as_ref() }.expect("not null");
unsafe {
std::slice::from_raw_parts(base.data[index], self.size())
}
}
pub fn data_mut(&mut self, index: usize) -> &mut [u8] {
let base = unsafe { self.base.as_mut() }.expect("not null");
unsafe {
std::slice::from_raw_parts_mut(base.data[index], self.size())
}
}
}
impl Drop for AVFrame {
fn drop(&mut self) {
unsafe { ffi::av_frame_free(&mut self.base) }
}
}
pub struct AVStream<'a> {
base: &'a mut ffi::AVStream,
phantom: PhantomData<&'a AVFormatContext>,
}
impl<'a> AVStream<'a> {
fn new(base: &'a mut ffi::AVStream, _: &'a AVFormatContext) -> Self {
return AVStream { base, phantom: PhantomData };
}
pub fn index(self: &AVStream<'a>) -> i32 {
self.base.index
}
pub fn time_base(self: &AVStream<'a>) -> Fraction {
Fraction::new(
self.base.time_base.num as u32,
self.base.time_base.den as u32,
)
}
pub fn timestamp(self: &AVStream<'a>, timestamp: i64) -> std::time::Duration {
std::time::Duration::from_millis(
1000 *
timestamp as u64 *
self.base.time_base.num as u64 /
self.base.time_base.den as u64
)
}
pub fn duration(self: &AVStream<'a>) -> std::time::Duration {
self.timestamp(self.base.duration)
}
pub fn frame_count(self: &AVStream<'a>) -> i64 {
self.base.nb_frames
}
pub fn discard(self: &AVStream<'a>) -> Option<AVDiscard> {
AVDiscard::from_i32(self.base.discard)
}
pub fn set_discard(self: &mut AVStream<'a>, value: AVDiscard) {
self.base.discard = value as ffi::AVDiscard;
}
pub fn sample_aspect_ratio(self: &AVStream<'a>) -> Fraction {
Fraction::new(
self.base.sample_aspect_ratio.num as u32,
self.base.sample_aspect_ratio.den as u32,
)
}
pub fn codec_parameters(self: &AVStream<'a>) -> AVCodecParameters {
AVCodecParameters::new(unsafe { self.base.codecpar.as_mut() }.expect("not null"), self)
}
}
pub struct AVCodecParameters<'a> {
base: &'a mut ffi::AVCodecParameters,
phantom: PhantomData<&'a AVStream<'a>>,
}
impl<'a> AVCodecParameters<'a> {
fn new(base: &'a mut ffi::AVCodecParameters, _: &'a AVStream) -> Self {
return AVCodecParameters { base, phantom: PhantomData };
}
// TODO: Just for testing
pub unsafe fn as_ref(&self) -> &ffi::AVCodecParameters {
self.base
}
pub fn codec_type(self: &AVCodecParameters<'a>) -> AVMediaType {
AVMediaType::from_i32(self.base.codec_type).unwrap_or(AVMediaType::Unknown)
}
pub fn codec_id(self: &AVCodecParameters<'a>) -> Option<AVCodecID> {
AVCodecID::from_u32(self.base.codec_id)
}
pub fn find_decoder(self: &AVCodecParameters<'a>) -> AVCodec {
AVCodec::new(
unsafe { ffi::avcodec_find_decoder(self.base.codec_id).as_mut() }.expect("Decoder not found"),
self,
)
}
}
pub struct AVCodec<'a> {
base: &'a mut ffi::AVCodec,
phantom: PhantomData<&'a AVCodecParameters<'a>>,
}
impl<'a> AVCodec<'a> {
fn new(base: &'a mut ffi::AVCodec, _: &'a AVCodecParameters) -> Self {
return AVCodec { base, phantom: PhantomData };
}
// TODO: Just for testing
pub unsafe fn as_ref(&self) -> &ffi::AVCodec {
self.base
}
pub fn name(self: &AVCodec<'a>) -> std::string::String {
String::from(unsafe { std::ffi::CStr::from_ptr(self.base.name) }.to_str().unwrap())
}
}
use ffmpeg_dev::sys as ffi; use ffmpeg_dev::sys as ffi;
use failure::bail;
use enum_primitive::*; use enum_primitive::*;
use std::marker::PhantomData;
use fraction::{Decimal, Fraction}; enum_from_primitive! {
#[doc = " Pixel format."]
#[doc = ""]
#[doc = " @note"]
#[doc = " AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA"]
#[doc = " color is put together as:"]
#[doc = " (A << 24) | (R << 16) | (G << 8) | B"]
#[doc = " This is stored as BGRA on little-endian CPU architectures and ARGB on"]
#[doc = " big-endian CPUs."]
#[doc = ""]
#[doc = " @note"]
#[doc = " If the resolution is not a multiple of the chroma subsampling factor"]
#[doc = " then the chroma plane resolution must be rounded up."]
#[doc = ""]
#[doc = " @par"]
#[doc = " When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized"]
#[doc = " image data is stored in AVFrame.data[0]. The palette is transported in"]
#[doc = " AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is"]
#[doc = " formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is"]
#[doc = " also endian-specific). Note also that the individual RGB32 palette"]
#[doc = " components stored in AVFrame.data[1] should be in the range 0..255."]
#[doc = " This is important as many custom PAL8 video codecs that were designed"]
#[doc = " to run on the IBM VGA graphics adapter use 6-bit palette components."]
#[doc = ""]
#[doc = " @par"]
#[doc = " For all the 8 bits per pixel formats, an RGB32 palette is in data[1] like"]
#[doc = " for pal8. This palette is filled in automatically by the function"]
#[doc = " allocating the picture."]
#[allow(non_camel_case_types)]
#[derive(Debug, Copy, Clone, PartialEq)]
#[repr(i32)]
pub enum AVPixelFormat {
NONE = ffi::AVPixelFormat_AV_PIX_FMT_NONE,
#[doc = "< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)"]
YUV420P = ffi::AVPixelFormat_AV_PIX_FMT_YUV420P,
#[doc = "< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr"]
YUYV422 = ffi::AVPixelFormat_AV_PIX_FMT_YUYV422,
#[doc = "< packed RGB 8:8:8, 24bpp, RGBRGB..."]
RGB24 = ffi::AVPixelFormat_AV_PIX_FMT_RGB24,
#[doc = "< packed RGB 8:8:8, 24bpp, BGRBGR..."]
BGR24 = ffi::AVPixelFormat_AV_PIX_FMT_BGR24,
#[doc = "< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)"]
YUV422P = ffi::AVPixelFormat_AV_PIX_FMT_YUV422P,
#[doc = "< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)"]
YUV444P = ffi::AVPixelFormat_AV_PIX_FMT_YUV444P,
#[doc = "< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)"]
YUV410P = ffi::AVPixelFormat_AV_PIX_FMT_YUV410P,
#[doc = "< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)"]
YUV411P = ffi::AVPixelFormat_AV_PIX_FMT_YUV411P,
#[doc = "< Y , 8bpp"]
GRAY8 = ffi::AVPixelFormat_AV_PIX_FMT_GRAY8,
#[doc = "< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb"]
MONOWHITE = ffi::AVPixelFormat_AV_PIX_FMT_MONOWHITE,
#[doc = "< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb"]
MONOBLACK = ffi::AVPixelFormat_AV_PIX_FMT_MONOBLACK,
#[doc = "< 8 bits with AV_PIX_FMT_RGB32 palette"]
PAL8 = ffi::AVPixelFormat_AV_PIX_FMT_PAL8,
#[doc = "< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting color_range"]
YUVJ420P = ffi::AVPixelFormat_AV_PIX_FMT_YUVJ420P,
#[doc = "< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting color_range"]
YUVJ422P = ffi::AVPixelFormat_AV_PIX_FMT_YUVJ422P,
#[doc = "< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting color_range"]
YUVJ444P = ffi::AVPixelFormat_AV_PIX_FMT_YUVJ444P,
#[doc = "< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1"]
UYVY422 = ffi::AVPixelFormat_AV_PIX_FMT_UYVY422,
#[doc = "< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3"]
UYYVYY411 = ffi::AVPixelFormat_AV_PIX_FMT_UYYVYY411,
#[doc = "< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)"]
BGR8 = ffi::AVPixelFormat_AV_PIX_FMT_BGR8,
#[doc = "< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits"]
BGR4 = ffi::AVPixelFormat_AV_PIX_FMT_BGR4,
#[doc = "< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)"]
BGR4_BYTE = ffi::AVPixelFormat_AV_PIX_FMT_BGR4_BYTE,
#[doc = "< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)"]
RGB8 = ffi::AVPixelFormat_AV_PIX_FMT_RGB8,
#[doc = "< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits"]
RGB4 = ffi::AVPixelFormat_AV_PIX_FMT_RGB4,
#[doc = "< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)"]
RGB4_BYTE = ffi::AVPixelFormat_AV_PIX_FMT_RGB4_BYTE,
#[doc = "< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)"]
NV12 = ffi::AVPixelFormat_AV_PIX_FMT_NV12,
#[doc = "< as above, but U and V bytes are swapped"]
NV21 = ffi::AVPixelFormat_AV_PIX_FMT_NV21,
#[doc = "< packed ARGB 8:8:8:8, 32bpp, ARGBARGB..."]
ARGB = ffi::AVPixelFormat_AV_PIX_FMT_ARGB,
#[doc = "< packed RGBA 8:8:8:8, 32bpp, RGBARGBA..."]
RGBA = ffi::AVPixelFormat_AV_PIX_FMT_RGBA,
#[doc = "< packed ABGR 8:8:8:8, 32bpp, ABGRABGR..."]
ABGR = ffi::AVPixelFormat_AV_PIX_FMT_ABGR,
#[doc = "< packed BGRA 8:8:8:8, 32bpp, BGRABGRA..."]
BGRA = ffi::AVPixelFormat_AV_PIX_FMT_BGRA,
#[doc = "< Y , 16bpp, big-endian"]
GRAY16BE = ffi::AVPixelFormat_AV_PIX_FMT_GRAY16BE,
#[doc = "< Y , 16bpp, little-endian"]
GRAY16LE = ffi::AVPixelFormat_AV_PIX_FMT_GRAY16LE,
#[doc = "< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)"]
YUV440P = ffi::AVPixelFormat_AV_PIX_FMT_YUV440P,
#[doc = "< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range"]
YUVJ440P = ffi::AVPixelFormat_AV_PIX_FMT_YUVJ440P,
#[doc = "< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)"]
YUVA420P = ffi::AVPixelFormat_AV_PIX_FMT_YUVA420P,
#[doc = "< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian"]
RGB48BE = ffi::AVPixelFormat_AV_PIX_FMT_RGB48BE,
#[doc = "< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian"]
RGB48LE = ffi::AVPixelFormat_AV_PIX_FMT_RGB48LE,
#[doc = "< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian"]
RGB565BE = ffi::AVPixelFormat_AV_PIX_FMT_RGB565BE,
#[doc = "< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian"]
RGB565LE = ffi::AVPixelFormat_AV_PIX_FMT_RGB565LE,
#[doc = "< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined"]
RGB555BE = ffi::AVPixelFormat_AV_PIX_FMT_RGB555BE,
#[doc = "< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined"]
RGB555LE = ffi::AVPixelFormat_AV_PIX_FMT_RGB555LE,
#[doc = "< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian"]
BGR565BE = ffi::AVPixelFormat_AV_PIX_FMT_BGR565BE,
#[doc = "< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian"]
BGR565LE = ffi::AVPixelFormat_AV_PIX_FMT_BGR565LE,
#[doc = "< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined"]
BGR555BE = ffi::AVPixelFormat_AV_PIX_FMT_BGR555BE,
#[doc = "< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined"]
BGR555LE = ffi::AVPixelFormat_AV_PIX_FMT_BGR555LE,
#[doc = "< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers"]
VAAPI_MOCO = ffi::AVPixelFormat_AV_PIX_FMT_VAAPI_MOCO,
#[doc = "< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers"]
VAAPI_IDCT = ffi::AVPixelFormat_AV_PIX_FMT_VAAPI_IDCT,
#[doc = "< HW decoding through VA API, Picture.data[3] contains a VASurfaceID"]
VAAPI_VLD = ffi::AVPixelFormat_AV_PIX_FMT_VAAPI_VLD,
#[doc = "< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian"]
YUV420P16LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV420P16LE,
#[doc = "< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian"]
YUV420P16BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV420P16BE,
#[doc = "< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian"]
YUV422P16LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV422P16LE,
#[doc = "< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian"]
YUV422P16BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV422P16BE,
#[doc = "< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian"]
YUV444P16LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV444P16LE,
#[doc = "< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian"]
YUV444P16BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV444P16BE,
#[doc = "< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer"]
DXVA2_VLD = ffi::AVPixelFormat_AV_PIX_FMT_DXVA2_VLD,
#[doc = "< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined"]
RGB444LE = ffi::AVPixelFormat_AV_PIX_FMT_RGB444LE,
#[doc = "< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined"]
RGB444BE = ffi::AVPixelFormat_AV_PIX_FMT_RGB444BE,
#[doc = "< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined"]
BGR444LE = ffi::AVPixelFormat_AV_PIX_FMT_BGR444LE,
#[doc = "< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined"]
BGR444BE = ffi::AVPixelFormat_AV_PIX_FMT_BGR444BE,
#[doc = "< 8 bits gray, 8 bits alpha"]
YA8 = ffi::AVPixelFormat_AV_PIX_FMT_YA8,
#[doc = "< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian"]
BGR48BE = ffi::AVPixelFormat_AV_PIX_FMT_BGR48BE,
#[doc = "< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian"]
BGR48LE = ffi::AVPixelFormat_AV_PIX_FMT_BGR48LE,
#[doc = "< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian"]
YUV420P9BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV420P9BE,
#[doc = "< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian"]
YUV420P9LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV420P9LE,
#[doc = "< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian"]
YUV420P10BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV420P10BE,
#[doc = "< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian"]
YUV420P10LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV420P10LE,
#[doc = "< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian"]
YUV422P10BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV422P10BE,
#[doc = "< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian"]
YUV422P10LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV422P10LE,
#[doc = "< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian"]
YUV444P9BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV444P9BE,
#[doc = "< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian"]
YUV444P9LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV444P9LE,
#[doc = "< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian"]
YUV444P10BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV444P10BE,
#[doc = "< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian"]
YUV444P10LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV444P10LE,
#[doc = "< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian"]
YUV422P9BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV422P9BE,
#[doc = "< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian"]
YUV422P9LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV422P9LE,
#[doc = "< planar GBR 4:4:4 24bpp"]
GBRP = ffi::AVPixelFormat_AV_PIX_FMT_GBRP,
#[doc = "< planar GBR 4:4:4 27bpp, big-endian"]
GBRP9BE = ffi::AVPixelFormat_AV_PIX_FMT_GBRP9BE,
#[doc = "< planar GBR 4:4:4 27bpp, little-endian"]
GBRP9LE = ffi::AVPixelFormat_AV_PIX_FMT_GBRP9LE,
#[doc = "< planar GBR 4:4:4 30bpp, big-endian"]
GBRP10BE = ffi::AVPixelFormat_AV_PIX_FMT_GBRP10BE,
#[doc = "< planar GBR 4:4:4 30bpp, little-endian"]
GBRP10LE = ffi::AVPixelFormat_AV_PIX_FMT_GBRP10LE,
#[doc = "< planar GBR 4:4:4 48bpp, big-endian"]
GBRP16BE = ffi::AVPixelFormat_AV_PIX_FMT_GBRP16BE,
#[doc = "< planar GBR 4:4:4 48bpp, little-endian"]
GBRP16LE = ffi::AVPixelFormat_AV_PIX_FMT_GBRP16LE,
#[doc = "< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)"]
YUVA422P = ffi::AVPixelFormat_AV_PIX_FMT_YUVA422P,
#[doc = "< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)"]
YUVA444P = ffi::AVPixelFormat_AV_PIX_FMT_YUVA444P,
#[doc = "< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian"]
YUVA420P9BE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA420P9BE,
#[doc = "< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian"]
YUVA420P9LE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA420P9LE,
#[doc = "< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian"]
YUVA422P9BE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA422P9BE,
#[doc = "< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian"]
YUVA422P9LE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA422P9LE,
#[doc = "< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian"]
YUVA444P9BE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA444P9BE,
#[doc = "< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian"]
YUVA444P9LE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA444P9LE,
#[doc = "< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)"]
YUVA420P10BE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA420P10BE,
#[doc = "< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)"]
YUVA420P10LE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA420P10LE,
#[doc = "< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)"]
YUVA422P10BE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA422P10BE,
#[doc = "< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)"]
YUVA422P10LE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA422P10LE,
#[doc = "< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)"]
YUVA444P10BE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA444P10BE,
#[doc = "< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)"]
YUVA444P10LE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA444P10LE,
#[doc = "< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)"]
YUVA420P16BE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA420P16BE,
#[doc = "< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)"]
YUVA420P16LE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA420P16LE,
#[doc = "< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)"]
YUVA422P16BE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA422P16BE,
#[doc = "< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)"]
YUVA422P16LE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA422P16LE,
#[doc = "< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)"]
YUVA444P16BE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA444P16BE,
#[doc = "< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)"]
YUVA444P16LE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA444P16LE,
#[doc = "< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface"]
VDPAU = ffi::AVPixelFormat_AV_PIX_FMT_VDPAU,
#[doc = "< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0"]
XYZ12LE = ffi::AVPixelFormat_AV_PIX_FMT_XYZ12LE,
#[doc = "< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0"]
XYZ12BE = ffi::AVPixelFormat_AV_PIX_FMT_XYZ12BE,
#[doc = "< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)"]
NV16 = ffi::AVPixelFormat_AV_PIX_FMT_NV16,
#[doc = "< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian"]
NV20LE = ffi::AVPixelFormat_AV_PIX_FMT_NV20LE,
#[doc = "< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian"]
NV20BE = ffi::AVPixelFormat_AV_PIX_FMT_NV20BE,
#[doc = "< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian"]
RGBA64BE = ffi::AVPixelFormat_AV_PIX_FMT_RGBA64BE,
#[doc = "< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian"]
RGBA64LE = ffi::AVPixelFormat_AV_PIX_FMT_RGBA64LE,
#[doc = "< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian"]
BGRA64BE = ffi::AVPixelFormat_AV_PIX_FMT_BGRA64BE,
#[doc = "< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian"]
BGRA64LE = ffi::AVPixelFormat_AV_PIX_FMT_BGRA64LE,
#[doc = "< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb"]
YVYU422 = ffi::AVPixelFormat_AV_PIX_FMT_YVYU422,
#[doc = "< 16 bits gray, 16 bits alpha (big-endian)"]
YA16BE = ffi::AVPixelFormat_AV_PIX_FMT_YA16BE,
#[doc = "< 16 bits gray, 16 bits alpha (little-endian)"]
YA16LE = ffi::AVPixelFormat_AV_PIX_FMT_YA16LE,
#[doc = "< planar GBRA 4:4:4:4 32bpp"]
GBRAP = ffi::AVPixelFormat_AV_PIX_FMT_GBRAP,
#[doc = "< planar GBRA 4:4:4:4 64bpp, big-endian"]
GBRAP16BE = ffi::AVPixelFormat_AV_PIX_FMT_GBRAP16BE,
#[doc = "< planar GBRA 4:4:4:4 64bpp, little-endian"]
GBRAP16LE = ffi::AVPixelFormat_AV_PIX_FMT_GBRAP16LE,
#[doc = " HW acceleration through QSV, data[3] contains a pointer to the"]
#[doc = " mfxFrameSurface1 structure."]
QSV = ffi::AVPixelFormat_AV_PIX_FMT_QSV,
#[doc = " HW acceleration though MMAL, data[3] contains a pointer to the"]
#[doc = " MMAL_BUFFER_HEADER_T structure."]
MMAL = ffi::AVPixelFormat_AV_PIX_FMT_MMAL,
#[doc = "< HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer"]
D3D11VA_VLD = ffi::AVPixelFormat_AV_PIX_FMT_D3D11VA_VLD,
#[doc = " HW acceleration through CUDA. data[i] contain CUdeviceptr pointers"]
#[doc = " exactly as for system memory frames."]
CUDA = ffi::AVPixelFormat_AV_PIX_FMT_CUDA,
#[doc = "< packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined"]
_0RGB = ffi::AVPixelFormat_AV_PIX_FMT_0RGB,
#[doc = "< packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined"]
RGB0 = ffi::AVPixelFormat_AV_PIX_FMT_RGB0,
#[doc = "< packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined"]
_0BGR = ffi::AVPixelFormat_AV_PIX_FMT_0BGR,
#[doc = "< packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined"]
BGR0 = ffi::AVPixelFormat_AV_PIX_FMT_BGR0,
#[doc = "< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian"]
YUV420P12BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV420P12BE,
#[doc = "< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian"]
YUV420P12LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV420P12LE,
#[doc = "< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian"]
YUV420P14BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV420P14BE,
#[doc = "< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian"]
YUV420P14LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV420P14LE,
#[doc = "< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian"]
YUV422P12BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV422P12BE,
#[doc = "< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian"]
YUV422P12LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV422P12LE,
#[doc = "< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian"]
YUV422P14BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV422P14BE,
#[doc = "< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian"]
YUV422P14LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV422P14LE,
#[doc = "< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian"]
YUV444P12BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV444P12BE,
#[doc = "< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian"]
YUV444P12LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV444P12LE,
#[doc = "< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian"]
YUV444P14BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV444P14BE,
#[doc = "< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian"]
YUV444P14LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV444P14LE,
#[doc = "< planar GBR 4:4:4 36bpp, big-endian"]
GBRP12BE = ffi::AVPixelFormat_AV_PIX_FMT_GBRP12BE,
#[doc = "< planar GBR 4:4:4 36bpp, little-endian"]
GBRP12LE = ffi::AVPixelFormat_AV_PIX_FMT_GBRP12LE,
#[doc = "< planar GBR 4:4:4 42bpp, big-endian"]
GBRP14BE = ffi::AVPixelFormat_AV_PIX_FMT_GBRP14BE,
#[doc = "< planar GBR 4:4:4 42bpp, little-endian"]
GBRP14LE = ffi::AVPixelFormat_AV_PIX_FMT_GBRP14LE,
#[doc = "< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range"]
YUVJ411P = ffi::AVPixelFormat_AV_PIX_FMT_YUVJ411P,
#[doc = "< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */"]
BAYER_BGGR8 = ffi::AVPixelFormat_AV_PIX_FMT_BAYER_BGGR8,
#[doc = "< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */"]
BAYER_RGGB8 = ffi::AVPixelFormat_AV_PIX_FMT_BAYER_RGGB8,
#[doc = "< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */"]
BAYER_GBRG8 = ffi::AVPixelFormat_AV_PIX_FMT_BAYER_GBRG8,
#[doc = "< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */"]
BAYER_GRBG8 = ffi::AVPixelFormat_AV_PIX_FMT_BAYER_GRBG8,
#[doc = "< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */"]
BAYER_BGGR16LE = ffi::AVPixelFormat_AV_PIX_FMT_BAYER_BGGR16LE,
#[doc = "< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */"]
BAYER_BGGR16BE = ffi::AVPixelFormat_AV_PIX_FMT_BAYER_BGGR16BE,
#[doc = "< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */"]
BAYER_RGGB16LE = ffi::AVPixelFormat_AV_PIX_FMT_BAYER_RGGB16LE,
#[doc = "< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */"]
BAYER_RGGB16BE = ffi::AVPixelFormat_AV_PIX_FMT_BAYER_RGGB16BE,
#[doc = "< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */"]
BAYER_GBRG16LE = ffi::AVPixelFormat_AV_PIX_FMT_BAYER_GBRG16LE,
#[doc = "< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */"]
BAYER_GBRG16BE = ffi::AVPixelFormat_AV_PIX_FMT_BAYER_GBRG16BE,
#[doc = "< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */"]
BAYER_GRBG16LE = ffi::AVPixelFormat_AV_PIX_FMT_BAYER_GRBG16LE,
#[doc = "< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */"]
BAYER_GRBG16BE = ffi::AVPixelFormat_AV_PIX_FMT_BAYER_GRBG16BE,
#[doc = "< XVideo Motion Acceleration via common packet passing"]
XVMC = ffi::AVPixelFormat_AV_PIX_FMT_XVMC,
#[doc = "< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian"]
YUV440P10LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV440P10LE,
#[doc = "< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian"]
YUV440P10BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV440P10BE,
#[doc = "< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian"]
YUV440P12LE = ffi::AVPixelFormat_AV_PIX_FMT_YUV440P12LE,
#[doc = "< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian"]
YUV440P12BE = ffi::AVPixelFormat_AV_PIX_FMT_YUV440P12BE,
#[doc = "< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian"]
AYUV64LE = ffi::AVPixelFormat_AV_PIX_FMT_AYUV64LE,
#[doc = "< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian"]
AYUV64BE = ffi::AVPixelFormat_AV_PIX_FMT_AYUV64BE,
#[doc = "< hardware decoding through Videotoolbox"]
VIDEOTOOLBOX = ffi::AVPixelFormat_AV_PIX_FMT_VIDEOTOOLBOX,
#[doc = "< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian"]
P010LE = ffi::AVPixelFormat_AV_PIX_FMT_P010LE,
#[doc = "< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian"]
P010BE = ffi::AVPixelFormat_AV_PIX_FMT_P010BE,
#[doc = "< planar GBR 4:4:4:4 48bpp, big-endian"]
GBRAP12BE = ffi::AVPixelFormat_AV_PIX_FMT_GBRAP12BE,
#[doc = "< planar GBR 4:4:4:4 48bpp, little-endian"]
GBRAP12LE = ffi::AVPixelFormat_AV_PIX_FMT_GBRAP12LE,
#[doc = "< planar GBR 4:4:4:4 40bpp, big-endian"]
GBRAP10BE = ffi::AVPixelFormat_AV_PIX_FMT_GBRAP10BE,
#[doc = "< planar GBR 4:4:4:4 40bpp, little-endian"]
GBRAP10LE = ffi::AVPixelFormat_AV_PIX_FMT_GBRAP10LE,
#[doc = "< hardware decoding through MediaCodec"]
MEDIACODEC = ffi::AVPixelFormat_AV_PIX_FMT_MEDIACODEC,
#[doc = "< Y , 12bpp, big-endian"]
GRAY12BE = ffi::AVPixelFormat_AV_PIX_FMT_GRAY12BE,
#[doc = "< Y , 12bpp, little-endian"]
GRAY12LE = ffi::AVPixelFormat_AV_PIX_FMT_GRAY12LE,
#[doc = "< Y , 10bpp, big-endian"]
GRAY10BE = ffi::AVPixelFormat_AV_PIX_FMT_GRAY10BE,
#[doc = "< Y , 10bpp, little-endian"]
GRAY10LE = ffi::AVPixelFormat_AV_PIX_FMT_GRAY10LE,
#[doc = "< like NV12, with 16bpp per component, little-endian"]
P016LE = ffi::AVPixelFormat_AV_PIX_FMT_P016LE,
#[doc = "< like NV12, with 16bpp per component, big-endian"]
P016BE = ffi::AVPixelFormat_AV_PIX_FMT_P016BE,
#[doc = " Hardware surfaces for Direct3D11."]
#[doc = ""]
#[doc = " This is preferred over the legacy AV_PIX_FMT_D3D11VA_VLD. The new D3D11"]
#[doc = " hwaccel API and filtering support AV_PIX_FMT_D3D11 only."]
#[doc = ""]
#[doc = " data[0] contains a ID3D11Texture2D pointer, and data[1] contains the"]
#[doc = " texture array index of the frame as intptr_t if the ID3D11Texture2D is"]
#[doc = " an array texture (or always 0 if it's a normal texture)."]
D3D11 = ffi::AVPixelFormat_AV_PIX_FMT_D3D11,
#[doc = "< Y , 9bpp, big-endian"]
GRAY9BE = ffi::AVPixelFormat_AV_PIX_FMT_GRAY9BE,
#[doc = "< Y , 9bpp, little-endian"]
GRAY9LE = ffi::AVPixelFormat_AV_PIX_FMT_GRAY9LE,
#[doc = "< IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian"]
GBRPF32BE = ffi::AVPixelFormat_AV_PIX_FMT_GBRPF32BE,
#[doc = "< IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian"]
GBRPF32LE = ffi::AVPixelFormat_AV_PIX_FMT_GBRPF32LE,
#[doc = "< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian"]
GBRAPF32BE = ffi::AVPixelFormat_AV_PIX_FMT_GBRAPF32BE,
#[doc = "< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian"]
GBRAPF32LE = ffi::AVPixelFormat_AV_PIX_FMT_GBRAPF32LE,
#[doc = " DRM-managed buffers exposed through PRIME buffer sharing."]
#[doc = ""]
#[doc = " data[0] points to an AVDRMFrameDescriptor."]
DRM_PRIME = ffi::AVPixelFormat_AV_PIX_FMT_DRM_PRIME,
#[doc = " Hardware surfaces for OpenCL."]
#[doc = ""]
#[doc = " data[i] contain 2D image objects (typed in C as cl_mem, used"]
#[doc = " in OpenCL as image2d_t) for each plane of the surface."]
OPENCL = ffi::AVPixelFormat_AV_PIX_FMT_OPENCL,
#[doc = "< Y , 14bpp, big-endian"]
GRAY14BE = ffi::AVPixelFormat_AV_PIX_FMT_GRAY14BE,
#[doc = "< Y , 14bpp, little-endian"]
GRAY14LE = ffi::AVPixelFormat_AV_PIX_FMT_GRAY14LE,
#[doc = "< IEEE-754 single precision Y, 32bpp, big-endian"]
GRAYF32BE = ffi::AVPixelFormat_AV_PIX_FMT_GRAYF32BE,
#[doc = "< IEEE-754 single precision Y, 32bpp, little-endian"]
GRAYF32LE = ffi::AVPixelFormat_AV_PIX_FMT_GRAYF32LE,
#[doc = "< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, big-endian"]
YUVA422P12BE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA422P12BE,
#[doc = "< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, little-endian"]
YUVA422P12LE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA422P12LE,
#[doc = "< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, big-endian"]
YUVA444P12BE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA444P12BE,
#[doc = "< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, little-endian"]
YUVA444P12LE = ffi::AVPixelFormat_AV_PIX_FMT_YUVA444P12LE,
#[doc = "< planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)"]
NV24 = ffi::AVPixelFormat_AV_PIX_FMT_NV24,
#[doc = "< as above, but U and V bytes are swapped"]
NV42 = ffi::AVPixelFormat_AV_PIX_FMT_NV42
}
}
enum_from_primitive! { enum_from_primitive! {
#[derive(Debug, Copy, Clone, PartialEq)] #[derive(Debug, Copy, Clone, PartialEq)]
...@@ -13,19 +447,18 @@ enum_from_primitive! { ...@@ -13,19 +447,18 @@ enum_from_primitive! {
Audio = ffi::AVMediaType_AVMEDIA_TYPE_AUDIO, Audio = ffi::AVMediaType_AVMEDIA_TYPE_AUDIO,
Data = ffi::AVMediaType_AVMEDIA_TYPE_DATA, Data = ffi::AVMediaType_AVMEDIA_TYPE_DATA,
Subtitle = ffi::AVMediaType_AVMEDIA_TYPE_SUBTITLE, Subtitle = ffi::AVMediaType_AVMEDIA_TYPE_SUBTITLE,
Attachment = ffi::AVMediaType_AVMEDIA_TYPE_ATTACHMENT, Attachment = ffi::AVMediaType_AVMEDIA_TYPE_ATTACHMENT
Nb = ffi::AVMediaType_AVMEDIA_TYPE_NB,
} }
} }
enum_from_primitive! { enum_from_primitive! {
#[allow(non_camel_case_types)]
#[doc = " Identify the syntax and semantics of the bitstream."] #[doc = " Identify the syntax and semantics of the bitstream."]
#[doc = " The principle is roughly:"] #[doc = " The principle is roughly:"]
#[doc = " Two decoders with the same ID can decode the same streams."] #[doc = " Two decoders with the same ID can decode the same streams."]
#[doc = " Two encoders with the same ID can encode compatible streams."] #[doc = " Two encoders with the same ID can encode compatible streams."]
#[doc = " There may be slight deviations from the principle due to implementation"] #[doc = " There may be slight deviations from the principle due to implementation"]
#[doc = " details."] #[doc = " details."]
#[allow(non_camel_case_types)]
#[derive(Debug, Copy, Clone, PartialEq)] #[derive(Debug, Copy, Clone, PartialEq)]
#[repr(u32)] #[repr(u32)]
pub enum AVCodecID { pub enum AVCodecID {
...@@ -479,72 +912,6 @@ enum_from_primitive! { ...@@ -479,72 +912,6 @@ enum_from_primitive! {
} }
} }
// TODO: Use proper errors (with struct etc) for this
enum_from_primitive! {
#[derive(Debug, Copy, Clone, PartialEq)]
#[repr(i32)]
pub enum AVErrorKind {
Unknown = ffi::AVERROR_EXPERIMENTAL,
InputChanged = ffi::AVERROR_INPUT_CHANGED,
OutputChanged = ffi::AVERROR_OUTPUT_CHANGED
}
}
pub struct AVFormatContext {
base: *mut ffi::AVFormatContext,
}
impl<'a> AVFormatContext {
pub fn new() -> Result<Self, failure::Error> {
let base = unsafe { ffi::avformat_alloc_context() };
if base.is_null() {
bail!("avformat_alloc_context() failed");
}
Ok(AVFormatContext { base })
}
// TODO: Just for testing
pub unsafe fn raw(&mut self) -> *mut ffi::AVFormatContext {
self.base
}
pub fn open_input(&mut self, path: &str) -> Result<(), failure::Error> {
match unsafe {
ffi::avformat_open_input(
&mut self.base,
std::ffi::CString::new(path)
.map_err(|_| failure::format_err!("Could not convert path to c string"))?
.as_ptr(),
std::ptr::null_mut(),
std::ptr::null_mut(),
)
} {
0 => Ok(()),
_ => bail!("Could not open input")
}
}
pub fn streams(&self) -> Vec<AVStream> {
return unsafe {
std::slice::from_raw_parts(
(*self.base).streams,
(*self.base).nb_streams as usize,
)
}
.iter()
.map(|stream| {
AVStream::new(unsafe { (*stream).as_mut() }.expect("not null"), self)
})
.collect();
}
}
impl Drop for AVFormatContext {
fn drop(&mut self) {
unsafe { ffi::avformat_free_context(self.base) }
}
}
enum_from_primitive! { enum_from_primitive! {
# [derive(Debug, Copy, Clone, PartialEq)] # [derive(Debug, Copy, Clone, PartialEq)]
# [repr(i32)] # [repr(i32)]
...@@ -565,126 +932,3 @@ enum_from_primitive! { ...@@ -565,126 +932,3 @@ enum_from_primitive! {
All =ffi::AVDiscard_AVDISCARD_ALL All =ffi::AVDiscard_AVDISCARD_ALL
} }
} }
\ No newline at end of file
pub struct AVStream<'a> {
base: &'a mut ffi::AVStream,
phantom: PhantomData<&'a AVFormatContext>,
}
impl<'a> AVStream<'a> {
fn new(base: &'a mut ffi::AVStream, _: &'a AVFormatContext) -> Self {
return AVStream { base, phantom: PhantomData };
}
// TODO: Just for testing
pub unsafe fn as_ref(&self) -> &ffi::AVStream {
self.base
}
pub unsafe fn as_mut(&mut self) -> &mut ffi::AVStream {
self.base
}
pub fn index(self: &AVStream<'a>) -> i32 {
self.base.index
}
pub fn time_base(self: &AVStream<'a>) -> Fraction {
Fraction::new(
self.base.time_base.num as u32,
self.base.time_base.den as u32,
)
}
pub fn timestamp(self: &AVStream<'a>, timestamp: i64) -> std::time::Duration {
std::time::Duration::from_millis(
1000 *
timestamp as u64 *
self.base.time_base.num as u64 /
self.base.time_base.den as u64
)
}
pub fn duration(self: &AVStream<'a>) -> std::time::Duration {
self.timestamp(self.base.duration)
}
pub fn frame_count(self: &AVStream<'a>) -> i64 {
self.base.nb_frames
}
pub fn discard(self: &AVStream<'a>) -> Option<AVDiscard> {
AVDiscard::from_i32(self.base.discard)
}
pub fn set_discard(self: &mut AVStream<'a>, value: AVDiscard) {
self.base.discard = value as i32;
}
pub fn sample_aspect_ratio(self: &AVStream<'a>) -> Fraction {
Fraction::new(
self.base.sample_aspect_ratio.num as u32,
self.base.sample_aspect_ratio.den as u32,
)
}
pub fn codec_parameters(self: &AVStream<'a>) -> AVCodecParameters {
AVCodecParameters::new(unsafe { self.base.codecpar.as_mut() }.expect("not null"), self)
}
}
pub struct AVCodecParameters<'a> {
base: &'a mut ffi::AVCodecParameters,
phantom: PhantomData<&'a AVStream<'a>>,
}
impl<'a> AVCodecParameters<'a> {
fn new(base: &'a mut ffi::AVCodecParameters, _: &'a AVStream) -> Self {
return AVCodecParameters { base, phantom: PhantomData };
}
// TODO: Just for testing
pub unsafe fn as_ref(&self) -> &ffi::AVCodecParameters {
self.base
}
pub unsafe fn as_mut(&mut self) -> &mut ffi::AVCodecParameters {
self.base
}
pub fn codec_type(self: &AVCodecParameters<'a>) -> AVMediaType {
AVMediaType::from_i32(self.base.codec_type).unwrap_or(AVMediaType::Unknown)
}
pub fn codec_id(self: &AVCodecParameters<'a>) -> Option<AVCodecID> {
AVCodecID::from_u32(self.base.codec_id)
}
pub fn find_decoder(self: &AVCodecParameters<'a>) -> AVCodec {
AVCodec::new(
unsafe { ffi::avcodec_find_decoder(self.base.codec_id).as_mut() }.expect("Decoder not found"),
self,
)
}
}
pub struct AVCodec<'a> {
base: &'a mut ffi::AVCodec,
phantom: PhantomData<&'a AVCodecParameters<'a>>,
}
impl<'a> AVCodec<'a> {
fn new(base: &'a mut ffi::AVCodec, _: &'a AVCodecParameters) -> Self {
return AVCodec { base, phantom: PhantomData };
}
// TODO: Just for testing
pub unsafe fn as_ref(&self) -> &ffi::AVCodec {
self.base
}
pub unsafe fn as_mut(&mut self) -> &mut ffi::AVCodec {
self.base
}
pub fn name(self: &AVCodec<'a>) -> std::string::String {
String::from(unsafe { std::ffi::CStr::from_ptr(self.base.name) }.to_str().unwrap())
}
}
pub(crate) mod enums;
pub(crate) mod api;
use ffmpeg_dev::sys as ffi; use ffmpeg_dev::sys as ffi;
use enum_primitive::*;
use std::collections::HashMap;
mod ffmpeg_api; pub(crate) mod ffmpeg_api;
use ffmpeg_api::*; use crate::ffmpeg_api::enums::*;
use crate::ffmpeg_api::api::*;
fn main() -> Result<(), std::io::Error> { fn main() -> Result<(), std::io::Error> {
let mut before = std::time::SystemTime::now(); let mut before = std::time::SystemTime::now();
...@@ -31,36 +30,26 @@ fn main() -> Result<(), std::io::Error> { ...@@ -31,36 +30,26 @@ fn main() -> Result<(), std::io::Error> {
local_codec.name() local_codec.name()
); );
// TODO: HERE BE DRAGONS let mut output_frame = AVFrame::new().unwrap_or_else(|error| {
panic!("Could not create output frame: {:?}", error)
let output_frame = unsafe { });
ffi::av_frame_alloc().as_mut() output_frame.init(160, 90, AVPixelFormat::RGB24).unwrap_or_else(|error| {
}.expect("not null"); panic!("Could not init output frame: {:?}", error)
});
let num_bytes: usize = unsafe {
ffi::avpicture_get_size(ffi::AVPixelFormat_AV_PIX_FMT_RGB24, 160, 90) as usize
};
let output_frame_buffer = unsafe {
(ffi::av_malloc(num_bytes) as *mut u8).as_ref()
}.expect("not null");
unsafe {
ffi::avpicture_fill(
output_frame as *mut ffi::AVFrame as *mut ffi::AVPicture,
output_frame_buffer,
ffi::AVPixelFormat_AV_PIX_FMT_RGB24,
160,
90,
);
}
match codec_parameters.codec_type() { match codec_parameters.codec_type() {
AVMediaType::Video => { AVMediaType::Video => {
// TODO: HERE BE DRAGONS
let avc_ctx: &mut ffi::AVCodecContext = unsafe { let avc_ctx: &mut ffi::AVCodecContext = unsafe {
ffi::avcodec_alloc_context3(local_codec.as_ref()).as_mut() ffi::avcodec_alloc_context3(local_codec.as_ref()).as_mut()
}.expect("not null"); }.expect("not null");
avc_ctx.skip_loop_filter = ffi::AVDiscard_AVDISCARD_NONKEY;
avc_ctx.skip_idct = ffi::AVDiscard_AVDISCARD_NONKEY;
avc_ctx.skip_frame = ffi::AVDiscard_AVDISCARD_NONKEY;
unsafe { unsafe {
ffi::avcodec_parameters_to_context(avc_ctx, codec_parameters.as_ref()); ffi::avcodec_parameters_to_context(avc_ctx, codec_parameters.as_ref());
ffi::avcodec_open2(avc_ctx, local_codec.as_ref(), std::ptr::null_mut()); ffi::avcodec_open2(avc_ctx, local_codec.as_ref(), std::ptr::null_mut());
...@@ -70,49 +59,39 @@ fn main() -> Result<(), std::io::Error> { ...@@ -70,49 +59,39 @@ fn main() -> Result<(), std::io::Error> {
ffi::av_packet_alloc().as_mut() ffi::av_packet_alloc().as_mut()
}.expect("not null"); }.expect("not null");
let frame: &mut ffi::AVFrame = unsafe { let mut frame = AVFrame::new().unwrap_or_else(|error| {
ffi::av_frame_alloc().as_mut() panic!("Could not create input frame: {:?}", error)
}.expect("not null"); });
avc_ctx.skip_loop_filter = ffi::AVDiscard_AVDISCARD_NONKEY;
avc_ctx.skip_idct = ffi::AVDiscard_AVDISCARD_NONKEY;
avc_ctx.skip_frame = ffi::AVDiscard_AVDISCARD_NONKEY;
let mut i = 0; let mut i = 0;
println!("Time: {:#?}", before.elapsed().unwrap()); println!("Time: {:#?}", before.elapsed().unwrap());
before = std::time::SystemTime::now(); before = std::time::SystemTime::now();
let mut sws_context: *mut ffi::SwsContext = std::ptr::null_mut();
while unsafe { ffi::av_read_frame(avformat_context.raw(), packet) } >= 0 && i < 10 { while unsafe { ffi::av_read_frame(avformat_context.raw(), packet) } >= 0 && i < 10 {
if packet.stream_index == stream.index() { if packet.stream_index == stream.index() {
unsafe { unsafe {
ffi::avcodec_send_packet(avc_ctx, packet); ffi::avcodec_send_packet(avc_ctx, packet);
} }
while unsafe { ffi::avcodec_receive_frame(avc_ctx, frame) } >= 0 { while unsafe { ffi::avcodec_receive_frame(avc_ctx, frame.as_mut()) } >= 0 {
let key_frame = frame.key_frame != 0;
let frame_index = frame.coded_picture_number;
println!( println!(
"Frame {}: {:?} @ {}", "Frame {}: {:?} @ {}",
frame_index, frame.coded_picture_number(),
stream.timestamp(frame.pts as i64), stream.timestamp(frame.pts()),
key_frame frame.key_frame()
); );
println!("Reading Time: {:#?}", before.elapsed().unwrap()); println!("Reading Time: {:#?}", before.elapsed().unwrap());
before = std::time::SystemTime::now(); before = std::time::SystemTime::now();
if sws_context.is_null() {
/* sws_context = unsafe {
if frame.width == last_width && frame.height == last_height && (frame.format as AVPixelFormat) == last_format {
}
*/
let sws_context: &mut ffi::SwsContext = unsafe {
ffi::sws_getContext( ffi::sws_getContext(
frame.width, frame.width(),
frame.height, frame.height(),
frame.format as ffi::AVPixelFormat, frame.format() as ffi::AVPixelFormat,
160, 160,
90, 90,
ffi::AVPixelFormat_AV_PIX_FMT_RGB24, ffi::AVPixelFormat_AV_PIX_FMT_RGB24,
...@@ -122,31 +101,30 @@ fn main() -> Result<(), std::io::Error> { ...@@ -122,31 +101,30 @@ fn main() -> Result<(), std::io::Error> {
std::ptr::null(), std::ptr::null(),
).as_mut() ).as_mut()
}.expect("not null"); }.expect("not null");
}
let success = unsafe { let success = unsafe {
ffi::sws_scale( ffi::sws_scale(
sws_context, sws_context,
frame.data.as_ptr() as *const *const u8, frame.data_ptr(),
&frame.linesize[0], frame.linesize().as_ptr(),
0, 0,
frame.height, frame.height(),
&output_frame.data[0], output_frame.data_mut_ptr(),
&output_frame.linesize[0], output_frame.linesize().as_ptr(),
) )
}; };
println!("success: {}, size: {}", success, num_bytes); println!("success: {}", success);
println!("Processing Time: {:#?}", before.elapsed().unwrap()); println!("Processing Time: {:#?}", before.elapsed().unwrap());
before = std::time::SystemTime::now(); before = std::time::SystemTime::now();
if success > 0 { if success > 0 {
image::save_buffer( image::save_buffer(
format!("/home/janne/Workspace/justflix/data/test/image_{}.png", i), format!("/home/janne/Workspace/justflix/data/test/image_{}.png", i),
unsafe { output_frame.data(0),
std::slice::from_raw_parts(output_frame.data[0], num_bytes) output_frame.width() as u32,
}, output_frame.height() as u32,
160,
90,
image::ColorType::Rgb8, image::ColorType::Rgb8,
).unwrap(); ).unwrap();
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment