gstreamer_video/auto/
video_decoder.rs

1// This file was generated by gir (https://github.com/gtk-rs/gir)
2// from gir-files (https://github.com/gtk-rs/gir-files)
3// from gst-gir-files (https://gitlab.freedesktop.org/gstreamer/gir-files-rs.git)
4// DO NOT EDIT
5
6#[cfg(feature = "v1_20")]
7#[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
8use crate::VideoDecoderRequestSyncPointFlags;
9use crate::{ffi, VideoCodecFrame};
10#[cfg(feature = "v1_18")]
11#[cfg_attr(docsrs, doc(cfg(feature = "v1_18")))]
12use glib::signal::{connect_raw, SignalHandlerId};
13use glib::{prelude::*, translate::*};
14#[cfg(feature = "v1_18")]
15#[cfg_attr(docsrs, doc(cfg(feature = "v1_18")))]
16use std::boxed::Box as Box_;
17
18glib::wrapper! {
19    /// This base class is for video decoders turning encoded data into raw video
20    /// frames.
21    ///
22    /// The GstVideoDecoder base class and derived subclasses should cooperate as
23    /// follows:
24    ///
25    /// ## Configuration
26    ///
27    ///  * Initially, GstVideoDecoder calls `start` when the decoder element
28    ///  is activated, which allows the subclass to perform any global setup.
29    ///
30    ///  * GstVideoDecoder calls `set_format` to inform the subclass of caps
31    ///  describing input video data that it is about to receive, including
32    ///  possibly configuration data.
33    ///  While unlikely, it might be called more than once, if changing input
34    ///  parameters require reconfiguration.
35    ///
36    ///  * Incoming data buffers are processed as needed, described in Data
37    ///  Processing below.
38    ///
39    ///  * GstVideoDecoder calls `stop` at end of all processing.
40    ///
41    /// ## Data processing
42    ///
43    ///  * The base class gathers input data, and optionally allows subclass
44    ///  to parse this into subsequently manageable chunks, typically
45    ///  corresponding to and referred to as 'frames'.
46    ///
47    ///  * Each input frame is provided in turn to the subclass' `handle_frame`
48    ///  callback.
49    ///  * When the subclass enables the subframe mode with `gst_video_decoder_set_subframe_mode`,
50    ///  the base class will provide to the subclass the same input frame with
51    ///  different input buffers to the subclass `handle_frame`
52    ///  callback. During this call, the subclass needs to take
53    ///  ownership of the input_buffer as [`VideoCodecFrame`][crate::VideoCodecFrame]
54    ///  will have been changed before the next subframe buffer is received.
55    ///  The subclass will call `gst_video_decoder_have_last_subframe`
56    ///  when a new input frame can be created by the base class.
57    ///  Every subframe will share the same [`VideoCodecFrame`][crate::VideoCodecFrame]
58    ///  to write the decoding result. The subclass is responsible to protect
59    ///  its access.
60    ///
61    ///  * If codec processing results in decoded data, the subclass should call
62    ///  [`VideoDecoderExt::finish_frame()`][crate::prelude::VideoDecoderExt::finish_frame()] to have decoded data pushed
63    ///  downstream. In subframe mode
64    ///  the subclass should call [`VideoDecoderExt::finish_subframe()`][crate::prelude::VideoDecoderExt::finish_subframe()] until the
65    ///  last subframe where it should call [`VideoDecoderExt::finish_frame()`][crate::prelude::VideoDecoderExt::finish_frame()].
66    ///  The subclass can detect the last subframe using GST_VIDEO_BUFFER_FLAG_MARKER
67    ///  on buffers or using its own logic to collect the subframes.
68    ///  In case of decoding failure, the subclass must call
69    ///  [`VideoDecoderExt::drop_frame()`][crate::prelude::VideoDecoderExt::drop_frame()] or [`VideoDecoderExt::drop_subframe()`][crate::prelude::VideoDecoderExt::drop_subframe()],
70    ///  to allow the base class to do timestamp and offset tracking, and possibly
71    ///  to requeue the frame for a later attempt in the case of reverse playback.
72    ///
73    /// ## Shutdown phase
74    ///
75    ///  * The GstVideoDecoder class calls `stop` to inform the subclass that data
76    ///  parsing will be stopped.
77    ///
78    /// ## Additional Notes
79    ///
80    ///  * Seeking/Flushing
81    ///
82    ///  * When the pipeline is seeked or otherwise flushed, the subclass is
83    ///  informed via a call to its `reset` callback, with the hard parameter
84    ///  set to true. This indicates the subclass should drop any internal data
85    ///  queues and timestamps and prepare for a fresh set of buffers to arrive
86    ///  for parsing and decoding.
87    ///
88    ///  * End Of Stream
89    ///
90    ///  * At end-of-stream, the subclass `parse` function may be called some final
91    ///  times with the at_eos parameter set to true, indicating that the element
92    ///  should not expect any more data to be arriving, and it should parse and
93    ///  remaining frames and call [`VideoDecoderExt::have_frame()`][crate::prelude::VideoDecoderExt::have_frame()] if possible.
94    ///
95    /// The subclass is responsible for providing pad template caps for
96    /// source and sink pads. The pads need to be named "sink" and "src". It also
97    /// needs to provide information about the output caps, when they are known.
98    /// This may be when the base class calls the subclass' `set_format` function,
99    /// though it might be during decoding, before calling
100    /// [`VideoDecoderExt::finish_frame()`][crate::prelude::VideoDecoderExt::finish_frame()]. This is done via
101    /// [`VideoDecoderExtManual::set_output_state()`][crate::prelude::VideoDecoderExtManual::set_output_state()]
102    ///
103    /// The subclass is also responsible for providing (presentation) timestamps
104    /// (likely based on corresponding input ones). If that is not applicable
105    /// or possible, the base class provides limited framerate based interpolation.
106    ///
107    /// Similarly, the base class provides some limited (legacy) seeking support
108    /// if specifically requested by the subclass, as full-fledged support
109    /// should rather be left to upstream demuxer, parser or alike. This simple
110    /// approach caters for seeking and duration reporting using estimated input
111    /// bitrates. To enable it, a subclass should call
112    /// [`VideoDecoderExt::set_estimate_rate()`][crate::prelude::VideoDecoderExt::set_estimate_rate()] to enable handling of incoming
113    /// byte-streams.
114    ///
115    /// The base class provides some support for reverse playback, in particular
116    /// in case incoming data is not packetized or upstream does not provide
117    /// fragments on keyframe boundaries. However, the subclass should then be
118    /// prepared for the parsing and frame processing stage to occur separately
119    /// (in normal forward processing, the latter immediately follows the former),
120    /// The subclass also needs to ensure the parsing stage properly marks
121    /// keyframes, unless it knows the upstream elements will do so properly for
122    /// incoming data.
123    ///
124    /// The bare minimum that a functional subclass needs to implement is:
125    ///
126    ///  * Provide pad templates
127    ///  * Inform the base class of output caps via
128    ///  [`VideoDecoderExtManual::set_output_state()`][crate::prelude::VideoDecoderExtManual::set_output_state()]
129    ///
130    ///  * Parse input data, if it is not considered packetized from upstream
131    ///  Data will be provided to `parse` which should invoke
132    ///  [`VideoDecoderExt::add_to_frame()`][crate::prelude::VideoDecoderExt::add_to_frame()] and [`VideoDecoderExt::have_frame()`][crate::prelude::VideoDecoderExt::have_frame()] to
133    ///  separate the data belonging to each video frame.
134    ///
135    ///  * Accept data in `handle_frame` and provide decoded results to
136    ///  [`VideoDecoderExt::finish_frame()`][crate::prelude::VideoDecoderExt::finish_frame()], or call [`VideoDecoderExt::drop_frame()`][crate::prelude::VideoDecoderExt::drop_frame()].
137    ///
138    /// This is an Abstract Base Class, you cannot instantiate it.
139    ///
140    /// ## Properties
141    ///
142    ///
143    /// #### `automatic-request-sync-point-flags`
144    ///  GstVideoDecoderRequestSyncPointFlags to use for the automatically
145    /// requested sync points if `automatic-request-sync-points` is enabled.
146    ///
147    /// Readable | Writeable
148    ///
149    ///
150    /// #### `automatic-request-sync-points`
151    ///  If set to [`true`] the decoder will automatically request sync points when
152    /// it seems like a good idea, e.g. if the first frames are not key frames or
153    /// if packet loss was reported by upstream.
154    ///
155    /// Readable | Writeable
156    ///
157    ///
158    /// #### `discard-corrupted-frames`
159    ///  If set to [`true`] the decoder will discard frames that are marked as
160    /// corrupted instead of outputting them.
161    ///
162    /// Readable | Writeable
163    ///
164    ///
165    /// #### `max-errors`
166    ///  Maximum number of tolerated consecutive decode errors. See
167    /// [`VideoDecoderExt::set_max_errors()`][crate::prelude::VideoDecoderExt::set_max_errors()] for more details.
168    ///
169    /// Readable | Writeable
170    ///
171    ///
172    /// #### `min-force-key-unit-interval`
173    ///  Minimum interval between force-key-unit events sent upstream by the
174    /// decoder. Setting this to 0 will cause every event to be handled, setting
175    /// this to `GST_CLOCK_TIME_NONE` will cause every event to be ignored.
176    ///
177    /// See `gst_video_event_new_upstream_force_key_unit()` for more details about
178    /// force-key-unit events.
179    ///
180    /// Readable | Writeable
181    ///
182    ///
183    /// #### `qos`
184    ///  If set to [`true`] the decoder will handle QoS events received
185    /// from downstream elements.
186    /// This includes dropping output frames which are detected as late
187    /// using the metrics reported by those events.
188    ///
189    /// Readable | Writeable
190    /// <details><summary><h4>Object</h4></summary>
191    ///
192    ///
193    /// #### `name`
194    ///  Readable | Writeable | Construct
195    ///
196    ///
197    /// #### `parent`
198    ///  The parent of the object. Please note, that when changing the 'parent'
199    /// property, we don't emit [`notify`][struct@crate::glib::Object#notify] and [`deep-notify`][struct@crate::gst::Object#deep-notify]
200    /// signals due to locking issues. In some cases one can use
201    /// `GstBin::element-added` or `GstBin::element-removed` signals on the parent to
202    /// achieve a similar effect.
203    ///
204    /// Readable | Writeable
205    /// </details>
206    ///
207    /// # Implements
208    ///
209    /// [`VideoDecoderExt`][trait@crate::prelude::VideoDecoderExt], [`trait@gst::prelude::ElementExt`], [`trait@gst::prelude::ObjectExt`], [`trait@glib::ObjectExt`], [`VideoDecoderExtManual`][trait@crate::prelude::VideoDecoderExtManual]
210    #[doc(alias = "GstVideoDecoder")]
211    pub struct VideoDecoder(Object<ffi::GstVideoDecoder, ffi::GstVideoDecoderClass>) @extends gst::Element, gst::Object;
212
213    match fn {
214        type_ => || ffi::gst_video_decoder_get_type(),
215    }
216}
217
218impl VideoDecoder {
219    pub const NONE: Option<&'static VideoDecoder> = None;
220}
221
222unsafe impl Send for VideoDecoder {}
223unsafe impl Sync for VideoDecoder {}
224
225/// Trait containing all [`struct@VideoDecoder`] methods.
226///
227/// # Implementors
228///
229/// [`VideoDecoder`][struct@crate::VideoDecoder]
230pub trait VideoDecoderExt: IsA<VideoDecoder> + 'static {
231    /// Removes next `n_bytes` of input data and adds it to currently parsed frame.
232    /// ## `n_bytes`
233    /// the number of bytes to add
234    #[doc(alias = "gst_video_decoder_add_to_frame")]
235    fn add_to_frame(&self, n_bytes: i32) {
236        unsafe {
237            ffi::gst_video_decoder_add_to_frame(self.as_ref().to_glib_none().0, n_bytes);
238        }
239    }
240
241    /// Helper function that allocates a buffer to hold a video frame for `self`'s
242    /// current [`VideoCodecState`][crate::VideoCodecState].
243    ///
244    /// You should use [`VideoDecoderExtManual::allocate_output_frame()`][crate::prelude::VideoDecoderExtManual::allocate_output_frame()] instead of this
245    /// function, if possible at all.
246    ///
247    /// # Returns
248    ///
249    /// allocated buffer, or NULL if no buffer could be
250    ///  allocated (e.g. when downstream is flushing or shutting down)
251    #[doc(alias = "gst_video_decoder_allocate_output_buffer")]
252    fn allocate_output_buffer(&self) -> Result<gst::Buffer, glib::BoolError> {
253        unsafe {
254            Option::<_>::from_glib_full(ffi::gst_video_decoder_allocate_output_buffer(
255                self.as_ref().to_glib_none().0,
256            ))
257            .ok_or_else(|| glib::bool_error!("Failed to allocate output buffer"))
258        }
259    }
260
261    /// Similar to [`finish_frame()`][Self::finish_frame()], but drops `frame` in any
262    /// case and posts a QoS message with the frame's details on the bus.
263    /// In any case, the frame is considered finished and released.
264    /// ## `frame`
265    /// the [`VideoCodecFrame`][crate::VideoCodecFrame] to drop
266    ///
267    /// # Returns
268    ///
269    /// a [`gst::FlowReturn`][crate::gst::FlowReturn], usually GST_FLOW_OK.
270    #[doc(alias = "gst_video_decoder_drop_frame")]
271    fn drop_frame(&self, frame: VideoCodecFrame) -> Result<gst::FlowSuccess, gst::FlowError> {
272        unsafe {
273            try_from_glib(ffi::gst_video_decoder_drop_frame(
274                self.as_ref().to_glib_none().0,
275                frame.into_glib_ptr(),
276            ))
277        }
278    }
279
280    /// Drops input data.
281    /// The frame is not considered finished until the whole frame
282    /// is finished or dropped by the subclass.
283    /// ## `frame`
284    /// the [`VideoCodecFrame`][crate::VideoCodecFrame]
285    ///
286    /// # Returns
287    ///
288    /// a [`gst::FlowReturn`][crate::gst::FlowReturn], usually GST_FLOW_OK.
289    #[cfg(feature = "v1_20")]
290    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
291    #[doc(alias = "gst_video_decoder_drop_subframe")]
292    fn drop_subframe(&self, frame: VideoCodecFrame) -> Result<gst::FlowSuccess, gst::FlowError> {
293        unsafe {
294            try_from_glib(ffi::gst_video_decoder_drop_subframe(
295                self.as_ref().to_glib_none().0,
296                frame.into_glib_ptr(),
297            ))
298        }
299    }
300
301    /// `frame` should have a valid decoded data buffer, whose metadata fields
302    /// are then appropriately set according to frame data and pushed downstream.
303    /// If no output data is provided, `frame` is considered skipped.
304    /// In any case, the frame is considered finished and released.
305    ///
306    /// After calling this function the output buffer of the frame is to be
307    /// considered read-only. This function will also change the metadata
308    /// of the buffer.
309    /// ## `frame`
310    /// a decoded [`VideoCodecFrame`][crate::VideoCodecFrame]
311    ///
312    /// # Returns
313    ///
314    /// a [`gst::FlowReturn`][crate::gst::FlowReturn] resulting from sending data downstream
315    #[doc(alias = "gst_video_decoder_finish_frame")]
316    fn finish_frame(&self, frame: VideoCodecFrame) -> Result<gst::FlowSuccess, gst::FlowError> {
317        unsafe {
318            try_from_glib(ffi::gst_video_decoder_finish_frame(
319                self.as_ref().to_glib_none().0,
320                frame.into_glib_ptr(),
321            ))
322        }
323    }
324
325    /// Indicate that a subframe has been finished to be decoded
326    /// by the subclass. This method should be called for all subframes
327    /// except the last subframe where [`finish_frame()`][Self::finish_frame()]
328    /// should be called instead.
329    /// ## `frame`
330    /// the [`VideoCodecFrame`][crate::VideoCodecFrame]
331    ///
332    /// # Returns
333    ///
334    /// a [`gst::FlowReturn`][crate::gst::FlowReturn], usually GST_FLOW_OK.
335    #[cfg(feature = "v1_20")]
336    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
337    #[doc(alias = "gst_video_decoder_finish_subframe")]
338    fn finish_subframe(&self, frame: VideoCodecFrame) -> Result<gst::FlowSuccess, gst::FlowError> {
339        unsafe {
340            try_from_glib(ffi::gst_video_decoder_finish_subframe(
341                self.as_ref().to_glib_none().0,
342                frame.into_glib_ptr(),
343            ))
344        }
345    }
346
347    ///
348    /// # Returns
349    ///
350    /// the instance of the [`gst::BufferPool`][crate::gst::BufferPool] used
351    /// by the decoder; free it after use it
352    #[doc(alias = "gst_video_decoder_get_buffer_pool")]
353    #[doc(alias = "get_buffer_pool")]
354    fn buffer_pool(&self) -> Option<gst::BufferPool> {
355        unsafe {
356            from_glib_full(ffi::gst_video_decoder_get_buffer_pool(
357                self.as_ref().to_glib_none().0,
358            ))
359        }
360    }
361
362    ///
363    /// # Returns
364    ///
365    /// currently configured byte to time conversion setting
366    #[doc(alias = "gst_video_decoder_get_estimate_rate")]
367    #[doc(alias = "get_estimate_rate")]
368    fn estimate_rate(&self) -> i32 {
369        unsafe { ffi::gst_video_decoder_get_estimate_rate(self.as_ref().to_glib_none().0) }
370    }
371
372    /// Determines maximum possible decoding time for `frame` that will
373    /// allow it to decode and arrive in time (as determined by QoS events).
374    /// In particular, a negative result means decoding in time is no longer possible
375    /// and should therefore occur as soon/skippy as possible.
376    /// ## `frame`
377    /// a [`VideoCodecFrame`][crate::VideoCodecFrame]
378    ///
379    /// # Returns
380    ///
381    /// max decoding time.
382    #[doc(alias = "gst_video_decoder_get_max_decode_time")]
383    #[doc(alias = "get_max_decode_time")]
384    fn max_decode_time(&self, frame: &VideoCodecFrame) -> gst::ClockTimeDiff {
385        unsafe {
386            ffi::gst_video_decoder_get_max_decode_time(
387                self.as_ref().to_glib_none().0,
388                frame.to_glib_none().0,
389            )
390        }
391    }
392
393    ///
394    /// # Returns
395    ///
396    /// currently configured decoder tolerated error count.
397    #[doc(alias = "gst_video_decoder_get_max_errors")]
398    #[doc(alias = "get_max_errors")]
399    #[doc(alias = "max-errors")]
400    fn max_errors(&self) -> i32 {
401        unsafe { ffi::gst_video_decoder_get_max_errors(self.as_ref().to_glib_none().0) }
402    }
403
404    /// Queries decoder required format handling.
405    ///
406    /// # Returns
407    ///
408    /// [`true`] if required format handling is enabled.
409    #[doc(alias = "gst_video_decoder_get_needs_format")]
410    #[doc(alias = "get_needs_format")]
411    fn needs_format(&self) -> bool {
412        unsafe {
413            from_glib(ffi::gst_video_decoder_get_needs_format(
414                self.as_ref().to_glib_none().0,
415            ))
416        }
417    }
418
419    /// Queries if the decoder requires a sync point before it starts outputting
420    /// data in the beginning.
421    ///
422    /// # Returns
423    ///
424    /// [`true`] if a sync point is required in the beginning.
425    #[cfg(feature = "v1_20")]
426    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
427    #[doc(alias = "gst_video_decoder_get_needs_sync_point")]
428    #[doc(alias = "get_needs_sync_point")]
429    fn needs_sync_point(&self) -> bool {
430        unsafe {
431            from_glib(ffi::gst_video_decoder_get_needs_sync_point(
432                self.as_ref().to_glib_none().0,
433            ))
434        }
435    }
436
437    /// Queries whether input data is considered packetized or not by the
438    /// base class.
439    ///
440    /// # Returns
441    ///
442    /// TRUE if input data is considered packetized.
443    #[doc(alias = "gst_video_decoder_get_packetized")]
444    #[doc(alias = "get_packetized")]
445    fn is_packetized(&self) -> bool {
446        unsafe {
447            from_glib(ffi::gst_video_decoder_get_packetized(
448                self.as_ref().to_glib_none().0,
449            ))
450        }
451    }
452
453    /// Returns the number of bytes previously added to the current frame
454    /// by calling [`add_to_frame()`][Self::add_to_frame()].
455    ///
456    /// # Returns
457    ///
458    /// The number of bytes pending for the current frame
459    #[doc(alias = "gst_video_decoder_get_pending_frame_size")]
460    #[doc(alias = "get_pending_frame_size")]
461    fn pending_frame_size(&self) -> usize {
462        unsafe { ffi::gst_video_decoder_get_pending_frame_size(self.as_ref().to_glib_none().0) }
463    }
464
465    ///
466    /// # Returns
467    ///
468    /// The current QoS proportion.
469    #[doc(alias = "gst_video_decoder_get_qos_proportion")]
470    #[doc(alias = "get_qos_proportion")]
471    fn qos_proportion(&self) -> f64 {
472        unsafe { ffi::gst_video_decoder_get_qos_proportion(self.as_ref().to_glib_none().0) }
473    }
474
475    /// Queries whether input data is considered as subframes or not by the
476    /// base class. If FALSE, each input buffer will be considered as a full
477    /// frame.
478    ///
479    /// # Returns
480    ///
481    /// TRUE if input data is considered as sub frames.
482    #[cfg(feature = "v1_20")]
483    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
484    #[doc(alias = "gst_video_decoder_get_subframe_mode")]
485    #[doc(alias = "get_subframe_mode")]
486    fn is_subframe_mode(&self) -> bool {
487        unsafe {
488            from_glib(ffi::gst_video_decoder_get_subframe_mode(
489                self.as_ref().to_glib_none().0,
490            ))
491        }
492    }
493
494    /// Gathers all data collected for currently parsed frame, gathers corresponding
495    /// metadata and passes it along for further processing, i.e. `handle_frame`.
496    ///
497    /// # Returns
498    ///
499    /// a [`gst::FlowReturn`][crate::gst::FlowReturn]
500    #[doc(alias = "gst_video_decoder_have_frame")]
501    fn have_frame(&self) -> Result<gst::FlowSuccess, gst::FlowError> {
502        unsafe {
503            try_from_glib(ffi::gst_video_decoder_have_frame(
504                self.as_ref().to_glib_none().0,
505            ))
506        }
507    }
508
509    /// Indicates that the last subframe has been processed by the decoder
510    /// in `frame`. This will release the current frame in video decoder
511    /// allowing to receive new frames from upstream elements. This method
512    /// must be called in the subclass `handle_frame` callback.
513    /// ## `frame`
514    /// the [`VideoCodecFrame`][crate::VideoCodecFrame] to update
515    ///
516    /// # Returns
517    ///
518    /// a [`gst::FlowReturn`][crate::gst::FlowReturn], usually GST_FLOW_OK.
519    #[cfg(feature = "v1_20")]
520    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
521    #[doc(alias = "gst_video_decoder_have_last_subframe")]
522    fn have_last_subframe(
523        &self,
524        frame: &VideoCodecFrame,
525    ) -> Result<gst::FlowSuccess, gst::FlowError> {
526        unsafe {
527            try_from_glib(ffi::gst_video_decoder_have_last_subframe(
528                self.as_ref().to_glib_none().0,
529                frame.to_glib_none().0,
530            ))
531        }
532    }
533
534    /// Sets the audio decoder tags and how they should be merged with any
535    /// upstream stream tags. This will override any tags previously-set
536    /// with `gst_audio_decoder_merge_tags()`.
537    ///
538    /// Note that this is provided for convenience, and the subclass is
539    /// not required to use this and can still do tag handling on its own.
540    ///
541    /// MT safe.
542    /// ## `tags`
543    /// a [`gst::TagList`][crate::gst::TagList] to merge, or NULL to unset
544    ///  previously-set tags
545    /// ## `mode`
546    /// the [`gst::TagMergeMode`][crate::gst::TagMergeMode] to use, usually [`gst::TagMergeMode::Replace`][crate::gst::TagMergeMode::Replace]
547    #[doc(alias = "gst_video_decoder_merge_tags")]
548    fn merge_tags(&self, tags: Option<&gst::TagList>, mode: gst::TagMergeMode) {
549        unsafe {
550            ffi::gst_video_decoder_merge_tags(
551                self.as_ref().to_glib_none().0,
552                tags.to_glib_none().0,
553                mode.into_glib(),
554            );
555        }
556    }
557
558    /// Returns caps that express `caps` (or sink template caps if `caps` == NULL)
559    /// restricted to resolution/format/... combinations supported by downstream
560    /// elements.
561    /// ## `caps`
562    /// initial caps
563    /// ## `filter`
564    /// filter caps
565    ///
566    /// # Returns
567    ///
568    /// a [`gst::Caps`][crate::gst::Caps] owned by caller
569    #[doc(alias = "gst_video_decoder_proxy_getcaps")]
570    fn proxy_getcaps(&self, caps: Option<&gst::Caps>, filter: Option<&gst::Caps>) -> gst::Caps {
571        unsafe {
572            from_glib_full(ffi::gst_video_decoder_proxy_getcaps(
573                self.as_ref().to_glib_none().0,
574                caps.to_glib_none().0,
575                filter.to_glib_none().0,
576            ))
577        }
578    }
579
580    /// Similar to [`drop_frame()`][Self::drop_frame()], but simply releases `frame`
581    /// without any processing other than removing it from list of pending frames,
582    /// after which it is considered finished and released.
583    /// ## `frame`
584    /// the [`VideoCodecFrame`][crate::VideoCodecFrame] to release
585    #[doc(alias = "gst_video_decoder_release_frame")]
586    fn release_frame(&self, frame: VideoCodecFrame) {
587        unsafe {
588            ffi::gst_video_decoder_release_frame(
589                self.as_ref().to_glib_none().0,
590                frame.into_glib_ptr(),
591            );
592        }
593    }
594
595    /// Allows the [`VideoDecoder`][crate::VideoDecoder] subclass to request from the base class that
596    /// a new sync should be requested from upstream, and that `frame` was the frame
597    /// when the subclass noticed that a new sync point is required. A reason for
598    /// the subclass to do this could be missing reference frames, for example.
599    ///
600    /// The base class will then request a new sync point from upstream as long as
601    /// the time that passed since the last one is exceeding
602    /// [`min-force-key-unit-interval`][struct@crate::VideoDecoder#min-force-key-unit-interval].
603    ///
604    /// The subclass can signal via `flags` how the frames until the next sync point
605    /// should be handled:
606    ///
607    ///  * If [`VideoDecoderRequestSyncPointFlags::DISCARD_INPUT`][crate::VideoDecoderRequestSyncPointFlags::DISCARD_INPUT] is selected then
608    ///  all following input frames until the next sync point are discarded.
609    ///  This can be useful if the lack of a sync point will prevent all further
610    ///  decoding and the decoder implementation is not very robust in handling
611    ///  missing references frames.
612    ///  * If [`VideoDecoderRequestSyncPointFlags::CORRUPT_OUTPUT`][crate::VideoDecoderRequestSyncPointFlags::CORRUPT_OUTPUT] is selected
613    ///  then all output frames following `frame` are marked as corrupted via
614    ///  `GST_BUFFER_FLAG_CORRUPTED`. Corrupted frames can be automatically
615    ///  dropped by the base class, see [`discard-corrupted-frames`][struct@crate::VideoDecoder#discard-corrupted-frames].
616    ///  Subclasses can manually mark frames as corrupted via [`VideoCodecFrameFlags::CORRUPTED`][crate::VideoCodecFrameFlags::CORRUPTED]
617    ///  before calling [`finish_frame()`][Self::finish_frame()].
618    /// ## `frame`
619    /// a [`VideoCodecFrame`][crate::VideoCodecFrame]
620    /// ## `flags`
621    /// [`VideoDecoderRequestSyncPointFlags`][crate::VideoDecoderRequestSyncPointFlags]
622    #[cfg(feature = "v1_20")]
623    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
624    #[doc(alias = "gst_video_decoder_request_sync_point")]
625    fn request_sync_point(
626        &self,
627        frame: &VideoCodecFrame,
628        flags: VideoDecoderRequestSyncPointFlags,
629    ) {
630        unsafe {
631            ffi::gst_video_decoder_request_sync_point(
632                self.as_ref().to_glib_none().0,
633                frame.to_glib_none().0,
634                flags.into_glib(),
635            );
636        }
637    }
638
639    /// Allows baseclass to perform byte to time estimated conversion.
640    /// ## `enabled`
641    /// whether to enable byte to time conversion
642    #[doc(alias = "gst_video_decoder_set_estimate_rate")]
643    fn set_estimate_rate(&self, enabled: bool) {
644        unsafe {
645            ffi::gst_video_decoder_set_estimate_rate(
646                self.as_ref().to_glib_none().0,
647                enabled.into_glib(),
648            );
649        }
650    }
651
652    /// Sets numbers of tolerated decoder errors, where a tolerated one is then only
653    /// warned about, but more than tolerated will lead to fatal error. You can set
654    /// -1 for never returning fatal errors. Default is set to
655    /// GST_VIDEO_DECODER_MAX_ERRORS.
656    ///
657    /// The '-1' option was added in 1.4
658    /// ## `num`
659    /// max tolerated errors
660    #[doc(alias = "gst_video_decoder_set_max_errors")]
661    #[doc(alias = "max-errors")]
662    fn set_max_errors(&self, num: i32) {
663        unsafe {
664            ffi::gst_video_decoder_set_max_errors(self.as_ref().to_glib_none().0, num);
665        }
666    }
667
668    /// Configures decoder format needs. If enabled, subclass needs to be
669    /// negotiated with format caps before it can process any data. It will then
670    /// never be handed any data before it has been configured.
671    /// Otherwise, it might be handed data without having been configured and
672    /// is then expected being able to do so either by default
673    /// or based on the input data.
674    /// ## `enabled`
675    /// new state
676    #[doc(alias = "gst_video_decoder_set_needs_format")]
677    fn set_needs_format(&self, enabled: bool) {
678        unsafe {
679            ffi::gst_video_decoder_set_needs_format(
680                self.as_ref().to_glib_none().0,
681                enabled.into_glib(),
682            );
683        }
684    }
685
686    /// Configures whether the decoder requires a sync point before it starts
687    /// outputting data in the beginning. If enabled, the base class will discard
688    /// all non-sync point frames in the beginning and after a flush and does not
689    /// pass it to the subclass.
690    ///
691    /// If the first frame is not a sync point, the base class will request a sync
692    /// point via the force-key-unit event.
693    /// ## `enabled`
694    /// new state
695    #[cfg(feature = "v1_20")]
696    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
697    #[doc(alias = "gst_video_decoder_set_needs_sync_point")]
698    fn set_needs_sync_point(&self, enabled: bool) {
699        unsafe {
700            ffi::gst_video_decoder_set_needs_sync_point(
701                self.as_ref().to_glib_none().0,
702                enabled.into_glib(),
703            );
704        }
705    }
706
707    /// Allows baseclass to consider input data as packetized or not. If the
708    /// input is packetized, then the `parse` method will not be called.
709    /// ## `packetized`
710    /// whether the input data should be considered as packetized.
711    #[doc(alias = "gst_video_decoder_set_packetized")]
712    fn set_packetized(&self, packetized: bool) {
713        unsafe {
714            ffi::gst_video_decoder_set_packetized(
715                self.as_ref().to_glib_none().0,
716                packetized.into_glib(),
717            );
718        }
719    }
720
721    /// If this is set to TRUE, it informs the base class that the subclass
722    /// can receive the data at a granularity lower than one frame.
723    ///
724    /// Note that in this mode, the subclass has two options. It can either
725    /// require the presence of a GST_VIDEO_BUFFER_FLAG_MARKER to mark the
726    /// end of a frame. Or it can operate in such a way that it will decode
727    /// a single frame at a time. In this second case, every buffer that
728    /// arrives to the element is considered part of the same frame until
729    /// [`finish_frame()`][Self::finish_frame()] is called.
730    ///
731    /// In either case, the same [`VideoCodecFrame`][crate::VideoCodecFrame] will be passed to the
732    /// GstVideoDecoderClass:handle_frame vmethod repeatedly with a
733    /// different GstVideoCodecFrame:input_buffer every time until the end of the
734    /// frame has been signaled using either method.
735    /// This method must be called during the decoder subclass `set_format` call.
736    /// ## `subframe_mode`
737    /// whether the input data should be considered as subframes.
738    #[cfg(feature = "v1_20")]
739    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
740    #[doc(alias = "gst_video_decoder_set_subframe_mode")]
741    fn set_subframe_mode(&self, subframe_mode: bool) {
742        unsafe {
743            ffi::gst_video_decoder_set_subframe_mode(
744                self.as_ref().to_glib_none().0,
745                subframe_mode.into_glib(),
746            );
747        }
748    }
749
750    /// Lets [`VideoDecoder`][crate::VideoDecoder] sub-classes decide if they want the sink pad
751    /// to use the default pad query handler to reply to accept-caps queries.
752    ///
753    /// By setting this to true it is possible to further customize the default
754    /// handler with `GST_PAD_SET_ACCEPT_INTERSECT` and
755    /// `GST_PAD_SET_ACCEPT_TEMPLATE`
756    /// ## `use_`
757    /// if the default pad accept-caps query handling should be used
758    #[doc(alias = "gst_video_decoder_set_use_default_pad_acceptcaps")]
759    fn set_use_default_pad_acceptcaps(&self, use_: bool) {
760        unsafe {
761            ffi::gst_video_decoder_set_use_default_pad_acceptcaps(
762                self.as_ref().to_glib_none().0,
763                use_.into_glib(),
764            );
765        }
766    }
767
768    /// GstVideoDecoderRequestSyncPointFlags to use for the automatically
769    /// requested sync points if `automatic-request-sync-points` is enabled.
770    #[cfg(feature = "v1_20")]
771    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
772    #[doc(alias = "automatic-request-sync-point-flags")]
773    fn automatic_request_sync_point_flags(&self) -> VideoDecoderRequestSyncPointFlags {
774        ObjectExt::property(self.as_ref(), "automatic-request-sync-point-flags")
775    }
776
777    /// GstVideoDecoderRequestSyncPointFlags to use for the automatically
778    /// requested sync points if `automatic-request-sync-points` is enabled.
779    #[cfg(feature = "v1_20")]
780    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
781    #[doc(alias = "automatic-request-sync-point-flags")]
782    fn set_automatic_request_sync_point_flags(
783        &self,
784        automatic_request_sync_point_flags: VideoDecoderRequestSyncPointFlags,
785    ) {
786        ObjectExt::set_property(
787            self.as_ref(),
788            "automatic-request-sync-point-flags",
789            automatic_request_sync_point_flags,
790        )
791    }
792
793    /// If set to [`true`] the decoder will automatically request sync points when
794    /// it seems like a good idea, e.g. if the first frames are not key frames or
795    /// if packet loss was reported by upstream.
796    #[cfg(feature = "v1_20")]
797    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
798    #[doc(alias = "automatic-request-sync-points")]
799    fn is_automatic_request_sync_points(&self) -> bool {
800        ObjectExt::property(self.as_ref(), "automatic-request-sync-points")
801    }
802
803    /// If set to [`true`] the decoder will automatically request sync points when
804    /// it seems like a good idea, e.g. if the first frames are not key frames or
805    /// if packet loss was reported by upstream.
806    #[cfg(feature = "v1_20")]
807    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
808    #[doc(alias = "automatic-request-sync-points")]
809    fn set_automatic_request_sync_points(&self, automatic_request_sync_points: bool) {
810        ObjectExt::set_property(
811            self.as_ref(),
812            "automatic-request-sync-points",
813            automatic_request_sync_points,
814        )
815    }
816
817    /// If set to [`true`] the decoder will discard frames that are marked as
818    /// corrupted instead of outputting them.
819    #[cfg(feature = "v1_20")]
820    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
821    #[doc(alias = "discard-corrupted-frames")]
822    fn is_discard_corrupted_frames(&self) -> bool {
823        ObjectExt::property(self.as_ref(), "discard-corrupted-frames")
824    }
825
826    /// If set to [`true`] the decoder will discard frames that are marked as
827    /// corrupted instead of outputting them.
828    #[cfg(feature = "v1_20")]
829    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
830    #[doc(alias = "discard-corrupted-frames")]
831    fn set_discard_corrupted_frames(&self, discard_corrupted_frames: bool) {
832        ObjectExt::set_property(
833            self.as_ref(),
834            "discard-corrupted-frames",
835            discard_corrupted_frames,
836        )
837    }
838
839    /// Minimum interval between force-key-unit events sent upstream by the
840    /// decoder. Setting this to 0 will cause every event to be handled, setting
841    /// this to `GST_CLOCK_TIME_NONE` will cause every event to be ignored.
842    ///
843    /// See `gst_video_event_new_upstream_force_key_unit()` for more details about
844    /// force-key-unit events.
845    #[cfg(feature = "v1_20")]
846    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
847    #[doc(alias = "min-force-key-unit-interval")]
848    fn min_force_key_unit_interval(&self) -> u64 {
849        ObjectExt::property(self.as_ref(), "min-force-key-unit-interval")
850    }
851
852    /// Minimum interval between force-key-unit events sent upstream by the
853    /// decoder. Setting this to 0 will cause every event to be handled, setting
854    /// this to `GST_CLOCK_TIME_NONE` will cause every event to be ignored.
855    ///
856    /// See `gst_video_event_new_upstream_force_key_unit()` for more details about
857    /// force-key-unit events.
858    #[cfg(feature = "v1_20")]
859    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
860    #[doc(alias = "min-force-key-unit-interval")]
861    fn set_min_force_key_unit_interval(&self, min_force_key_unit_interval: u64) {
862        ObjectExt::set_property(
863            self.as_ref(),
864            "min-force-key-unit-interval",
865            min_force_key_unit_interval,
866        )
867    }
868
869    /// If set to [`true`] the decoder will handle QoS events received
870    /// from downstream elements.
871    /// This includes dropping output frames which are detected as late
872    /// using the metrics reported by those events.
873    #[cfg(feature = "v1_18")]
874    #[cfg_attr(docsrs, doc(cfg(feature = "v1_18")))]
875    fn is_qos(&self) -> bool {
876        ObjectExt::property(self.as_ref(), "qos")
877    }
878
879    /// If set to [`true`] the decoder will handle QoS events received
880    /// from downstream elements.
881    /// This includes dropping output frames which are detected as late
882    /// using the metrics reported by those events.
883    #[cfg(feature = "v1_18")]
884    #[cfg_attr(docsrs, doc(cfg(feature = "v1_18")))]
885    fn set_qos(&self, qos: bool) {
886        ObjectExt::set_property(self.as_ref(), "qos", qos)
887    }
888
889    #[cfg(feature = "v1_20")]
890    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
891    #[doc(alias = "automatic-request-sync-point-flags")]
892    fn connect_automatic_request_sync_point_flags_notify<F: Fn(&Self) + Send + Sync + 'static>(
893        &self,
894        f: F,
895    ) -> SignalHandlerId {
896        unsafe extern "C" fn notify_automatic_request_sync_point_flags_trampoline<
897            P: IsA<VideoDecoder>,
898            F: Fn(&P) + Send + Sync + 'static,
899        >(
900            this: *mut ffi::GstVideoDecoder,
901            _param_spec: glib::ffi::gpointer,
902            f: glib::ffi::gpointer,
903        ) {
904            let f: &F = &*(f as *const F);
905            f(VideoDecoder::from_glib_borrow(this).unsafe_cast_ref())
906        }
907        unsafe {
908            let f: Box_<F> = Box_::new(f);
909            connect_raw(
910                self.as_ptr() as *mut _,
911                c"notify::automatic-request-sync-point-flags".as_ptr() as *const _,
912                Some(std::mem::transmute::<*const (), unsafe extern "C" fn()>(
913                    notify_automatic_request_sync_point_flags_trampoline::<Self, F> as *const (),
914                )),
915                Box_::into_raw(f),
916            )
917        }
918    }
919
920    #[cfg(feature = "v1_20")]
921    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
922    #[doc(alias = "automatic-request-sync-points")]
923    fn connect_automatic_request_sync_points_notify<F: Fn(&Self) + Send + Sync + 'static>(
924        &self,
925        f: F,
926    ) -> SignalHandlerId {
927        unsafe extern "C" fn notify_automatic_request_sync_points_trampoline<
928            P: IsA<VideoDecoder>,
929            F: Fn(&P) + Send + Sync + 'static,
930        >(
931            this: *mut ffi::GstVideoDecoder,
932            _param_spec: glib::ffi::gpointer,
933            f: glib::ffi::gpointer,
934        ) {
935            let f: &F = &*(f as *const F);
936            f(VideoDecoder::from_glib_borrow(this).unsafe_cast_ref())
937        }
938        unsafe {
939            let f: Box_<F> = Box_::new(f);
940            connect_raw(
941                self.as_ptr() as *mut _,
942                c"notify::automatic-request-sync-points".as_ptr() as *const _,
943                Some(std::mem::transmute::<*const (), unsafe extern "C" fn()>(
944                    notify_automatic_request_sync_points_trampoline::<Self, F> as *const (),
945                )),
946                Box_::into_raw(f),
947            )
948        }
949    }
950
951    #[cfg(feature = "v1_20")]
952    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
953    #[doc(alias = "discard-corrupted-frames")]
954    fn connect_discard_corrupted_frames_notify<F: Fn(&Self) + Send + Sync + 'static>(
955        &self,
956        f: F,
957    ) -> SignalHandlerId {
958        unsafe extern "C" fn notify_discard_corrupted_frames_trampoline<
959            P: IsA<VideoDecoder>,
960            F: Fn(&P) + Send + Sync + 'static,
961        >(
962            this: *mut ffi::GstVideoDecoder,
963            _param_spec: glib::ffi::gpointer,
964            f: glib::ffi::gpointer,
965        ) {
966            let f: &F = &*(f as *const F);
967            f(VideoDecoder::from_glib_borrow(this).unsafe_cast_ref())
968        }
969        unsafe {
970            let f: Box_<F> = Box_::new(f);
971            connect_raw(
972                self.as_ptr() as *mut _,
973                c"notify::discard-corrupted-frames".as_ptr() as *const _,
974                Some(std::mem::transmute::<*const (), unsafe extern "C" fn()>(
975                    notify_discard_corrupted_frames_trampoline::<Self, F> as *const (),
976                )),
977                Box_::into_raw(f),
978            )
979        }
980    }
981
982    #[cfg(feature = "v1_18")]
983    #[cfg_attr(docsrs, doc(cfg(feature = "v1_18")))]
984    #[doc(alias = "max-errors")]
985    fn connect_max_errors_notify<F: Fn(&Self) + Send + Sync + 'static>(
986        &self,
987        f: F,
988    ) -> SignalHandlerId {
989        unsafe extern "C" fn notify_max_errors_trampoline<
990            P: IsA<VideoDecoder>,
991            F: Fn(&P) + Send + Sync + 'static,
992        >(
993            this: *mut ffi::GstVideoDecoder,
994            _param_spec: glib::ffi::gpointer,
995            f: glib::ffi::gpointer,
996        ) {
997            let f: &F = &*(f as *const F);
998            f(VideoDecoder::from_glib_borrow(this).unsafe_cast_ref())
999        }
1000        unsafe {
1001            let f: Box_<F> = Box_::new(f);
1002            connect_raw(
1003                self.as_ptr() as *mut _,
1004                c"notify::max-errors".as_ptr() as *const _,
1005                Some(std::mem::transmute::<*const (), unsafe extern "C" fn()>(
1006                    notify_max_errors_trampoline::<Self, F> as *const (),
1007                )),
1008                Box_::into_raw(f),
1009            )
1010        }
1011    }
1012
1013    #[cfg(feature = "v1_20")]
1014    #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
1015    #[doc(alias = "min-force-key-unit-interval")]
1016    fn connect_min_force_key_unit_interval_notify<F: Fn(&Self) + Send + Sync + 'static>(
1017        &self,
1018        f: F,
1019    ) -> SignalHandlerId {
1020        unsafe extern "C" fn notify_min_force_key_unit_interval_trampoline<
1021            P: IsA<VideoDecoder>,
1022            F: Fn(&P) + Send + Sync + 'static,
1023        >(
1024            this: *mut ffi::GstVideoDecoder,
1025            _param_spec: glib::ffi::gpointer,
1026            f: glib::ffi::gpointer,
1027        ) {
1028            let f: &F = &*(f as *const F);
1029            f(VideoDecoder::from_glib_borrow(this).unsafe_cast_ref())
1030        }
1031        unsafe {
1032            let f: Box_<F> = Box_::new(f);
1033            connect_raw(
1034                self.as_ptr() as *mut _,
1035                c"notify::min-force-key-unit-interval".as_ptr() as *const _,
1036                Some(std::mem::transmute::<*const (), unsafe extern "C" fn()>(
1037                    notify_min_force_key_unit_interval_trampoline::<Self, F> as *const (),
1038                )),
1039                Box_::into_raw(f),
1040            )
1041        }
1042    }
1043
1044    #[cfg(feature = "v1_18")]
1045    #[cfg_attr(docsrs, doc(cfg(feature = "v1_18")))]
1046    #[doc(alias = "qos")]
1047    fn connect_qos_notify<F: Fn(&Self) + Send + Sync + 'static>(&self, f: F) -> SignalHandlerId {
1048        unsafe extern "C" fn notify_qos_trampoline<
1049            P: IsA<VideoDecoder>,
1050            F: Fn(&P) + Send + Sync + 'static,
1051        >(
1052            this: *mut ffi::GstVideoDecoder,
1053            _param_spec: glib::ffi::gpointer,
1054            f: glib::ffi::gpointer,
1055        ) {
1056            let f: &F = &*(f as *const F);
1057            f(VideoDecoder::from_glib_borrow(this).unsafe_cast_ref())
1058        }
1059        unsafe {
1060            let f: Box_<F> = Box_::new(f);
1061            connect_raw(
1062                self.as_ptr() as *mut _,
1063                c"notify::qos".as_ptr() as *const _,
1064                Some(std::mem::transmute::<*const (), unsafe extern "C" fn()>(
1065                    notify_qos_trampoline::<Self, F> as *const (),
1066                )),
1067                Box_::into_raw(f),
1068            )
1069        }
1070    }
1071}
1072
1073impl<O: IsA<VideoDecoder>> VideoDecoderExt for O {}