gstreamer_video/auto/video_decoder.rs
1// This file was generated by gir (https://github.com/gtk-rs/gir)
2// from gir-files (https://github.com/gtk-rs/gir-files)
3// from gst-gir-files (https://gitlab.freedesktop.org/gstreamer/gir-files-rs.git)
4// DO NOT EDIT
5
6#[cfg(feature = "v1_20")]
7#[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
8use crate::VideoDecoderRequestSyncPointFlags;
9use crate::{ffi, VideoCodecFrame};
10#[cfg(feature = "v1_18")]
11#[cfg_attr(docsrs, doc(cfg(feature = "v1_18")))]
12use glib::signal::{connect_raw, SignalHandlerId};
13use glib::{prelude::*, translate::*};
14#[cfg(feature = "v1_18")]
15#[cfg_attr(docsrs, doc(cfg(feature = "v1_18")))]
16use std::boxed::Box as Box_;
17
18glib::wrapper! {
19 /// This base class is for video decoders turning encoded data into raw video
20 /// frames.
21 ///
22 /// The GstVideoDecoder base class and derived subclasses should cooperate as
23 /// follows:
24 ///
25 /// ## Configuration
26 ///
27 /// * Initially, GstVideoDecoder calls `start` when the decoder element
28 /// is activated, which allows the subclass to perform any global setup.
29 ///
30 /// * GstVideoDecoder calls `set_format` to inform the subclass of caps
31 /// describing input video data that it is about to receive, including
32 /// possibly configuration data.
33 /// While unlikely, it might be called more than once, if changing input
34 /// parameters require reconfiguration.
35 ///
36 /// * Incoming data buffers are processed as needed, described in Data
37 /// Processing below.
38 ///
39 /// * GstVideoDecoder calls `stop` at end of all processing.
40 ///
41 /// ## Data processing
42 ///
43 /// * The base class gathers input data, and optionally allows subclass
44 /// to parse this into subsequently manageable chunks, typically
45 /// corresponding to and referred to as 'frames'.
46 ///
47 /// * Each input frame is provided in turn to the subclass' `handle_frame`
48 /// callback.
49 /// * When the subclass enables the subframe mode with `gst_video_decoder_set_subframe_mode`,
50 /// the base class will provide to the subclass the same input frame with
51 /// different input buffers to the subclass `handle_frame`
52 /// callback. During this call, the subclass needs to take
53 /// ownership of the input_buffer as [`VideoCodecFrame`][crate::VideoCodecFrame]
54 /// will have been changed before the next subframe buffer is received.
55 /// The subclass will call `gst_video_decoder_have_last_subframe`
56 /// when a new input frame can be created by the base class.
57 /// Every subframe will share the same [`VideoCodecFrame`][crate::VideoCodecFrame]
58 /// to write the decoding result. The subclass is responsible to protect
59 /// its access.
60 ///
61 /// * If codec processing results in decoded data, the subclass should call
62 /// [`VideoDecoderExt::finish_frame()`][crate::prelude::VideoDecoderExt::finish_frame()] to have decoded data pushed
63 /// downstream. In subframe mode
64 /// the subclass should call [`VideoDecoderExt::finish_subframe()`][crate::prelude::VideoDecoderExt::finish_subframe()] until the
65 /// last subframe where it should call [`VideoDecoderExt::finish_frame()`][crate::prelude::VideoDecoderExt::finish_frame()].
66 /// The subclass can detect the last subframe using GST_VIDEO_BUFFER_FLAG_MARKER
67 /// on buffers or using its own logic to collect the subframes.
68 /// In case of decoding failure, the subclass must call
69 /// [`VideoDecoderExt::drop_frame()`][crate::prelude::VideoDecoderExt::drop_frame()] or [`VideoDecoderExt::drop_subframe()`][crate::prelude::VideoDecoderExt::drop_subframe()],
70 /// to allow the base class to do timestamp and offset tracking, and possibly
71 /// to requeue the frame for a later attempt in the case of reverse playback.
72 ///
73 /// ## Shutdown phase
74 ///
75 /// * The GstVideoDecoder class calls `stop` to inform the subclass that data
76 /// parsing will be stopped.
77 ///
78 /// ## Additional Notes
79 ///
80 /// * Seeking/Flushing
81 ///
82 /// * When the pipeline is seeked or otherwise flushed, the subclass is
83 /// informed via a call to its `reset` callback, with the hard parameter
84 /// set to true. This indicates the subclass should drop any internal data
85 /// queues and timestamps and prepare for a fresh set of buffers to arrive
86 /// for parsing and decoding.
87 ///
88 /// * End Of Stream
89 ///
90 /// * At end-of-stream, the subclass `parse` function may be called some final
91 /// times with the at_eos parameter set to true, indicating that the element
92 /// should not expect any more data to be arriving, and it should parse and
93 /// remaining frames and call [`VideoDecoderExt::have_frame()`][crate::prelude::VideoDecoderExt::have_frame()] if possible.
94 ///
95 /// The subclass is responsible for providing pad template caps for
96 /// source and sink pads. The pads need to be named "sink" and "src". It also
97 /// needs to provide information about the output caps, when they are known.
98 /// This may be when the base class calls the subclass' `set_format` function,
99 /// though it might be during decoding, before calling
100 /// [`VideoDecoderExt::finish_frame()`][crate::prelude::VideoDecoderExt::finish_frame()]. This is done via
101 /// [`VideoDecoderExtManual::set_output_state()`][crate::prelude::VideoDecoderExtManual::set_output_state()]
102 ///
103 /// The subclass is also responsible for providing (presentation) timestamps
104 /// (likely based on corresponding input ones). If that is not applicable
105 /// or possible, the base class provides limited framerate based interpolation.
106 ///
107 /// Similarly, the base class provides some limited (legacy) seeking support
108 /// if specifically requested by the subclass, as full-fledged support
109 /// should rather be left to upstream demuxer, parser or alike. This simple
110 /// approach caters for seeking and duration reporting using estimated input
111 /// bitrates. To enable it, a subclass should call
112 /// [`VideoDecoderExt::set_estimate_rate()`][crate::prelude::VideoDecoderExt::set_estimate_rate()] to enable handling of incoming
113 /// byte-streams.
114 ///
115 /// The base class provides some support for reverse playback, in particular
116 /// in case incoming data is not packetized or upstream does not provide
117 /// fragments on keyframe boundaries. However, the subclass should then be
118 /// prepared for the parsing and frame processing stage to occur separately
119 /// (in normal forward processing, the latter immediately follows the former),
120 /// The subclass also needs to ensure the parsing stage properly marks
121 /// keyframes, unless it knows the upstream elements will do so properly for
122 /// incoming data.
123 ///
124 /// The bare minimum that a functional subclass needs to implement is:
125 ///
126 /// * Provide pad templates
127 /// * Inform the base class of output caps via
128 /// [`VideoDecoderExtManual::set_output_state()`][crate::prelude::VideoDecoderExtManual::set_output_state()]
129 ///
130 /// * Parse input data, if it is not considered packetized from upstream
131 /// Data will be provided to `parse` which should invoke
132 /// [`VideoDecoderExt::add_to_frame()`][crate::prelude::VideoDecoderExt::add_to_frame()] and [`VideoDecoderExt::have_frame()`][crate::prelude::VideoDecoderExt::have_frame()] to
133 /// separate the data belonging to each video frame.
134 ///
135 /// * Accept data in `handle_frame` and provide decoded results to
136 /// [`VideoDecoderExt::finish_frame()`][crate::prelude::VideoDecoderExt::finish_frame()], or call [`VideoDecoderExt::drop_frame()`][crate::prelude::VideoDecoderExt::drop_frame()].
137 ///
138 /// This is an Abstract Base Class, you cannot instantiate it.
139 ///
140 /// ## Properties
141 ///
142 ///
143 /// #### `automatic-request-sync-point-flags`
144 /// GstVideoDecoderRequestSyncPointFlags to use for the automatically
145 /// requested sync points if `automatic-request-sync-points` is enabled.
146 ///
147 /// Readable | Writeable
148 ///
149 ///
150 /// #### `automatic-request-sync-points`
151 /// If set to [`true`] the decoder will automatically request sync points when
152 /// it seems like a good idea, e.g. if the first frames are not key frames or
153 /// if packet loss was reported by upstream.
154 ///
155 /// Readable | Writeable
156 ///
157 ///
158 /// #### `discard-corrupted-frames`
159 /// If set to [`true`] the decoder will discard frames that are marked as
160 /// corrupted instead of outputting them.
161 ///
162 /// Readable | Writeable
163 ///
164 ///
165 /// #### `max-errors`
166 /// Maximum number of tolerated consecutive decode errors. See
167 /// [`VideoDecoderExt::set_max_errors()`][crate::prelude::VideoDecoderExt::set_max_errors()] for more details.
168 ///
169 /// Readable | Writeable
170 ///
171 ///
172 /// #### `min-force-key-unit-interval`
173 /// Minimum interval between force-key-unit events sent upstream by the
174 /// decoder. Setting this to 0 will cause every event to be handled, setting
175 /// this to `GST_CLOCK_TIME_NONE` will cause every event to be ignored.
176 ///
177 /// See `gst_video_event_new_upstream_force_key_unit()` for more details about
178 /// force-key-unit events.
179 ///
180 /// Readable | Writeable
181 ///
182 ///
183 /// #### `qos`
184 /// If set to [`true`] the decoder will handle QoS events received
185 /// from downstream elements.
186 /// This includes dropping output frames which are detected as late
187 /// using the metrics reported by those events.
188 ///
189 /// Readable | Writeable
190 /// <details><summary><h4>Object</h4></summary>
191 ///
192 ///
193 /// #### `name`
194 /// Readable | Writeable | Construct
195 ///
196 ///
197 /// #### `parent`
198 /// The parent of the object. Please note, that when changing the 'parent'
199 /// property, we don't emit [`notify`][struct@crate::glib::Object#notify] and [`deep-notify`][struct@crate::gst::Object#deep-notify]
200 /// signals due to locking issues. In some cases one can use
201 /// `GstBin::element-added` or `GstBin::element-removed` signals on the parent to
202 /// achieve a similar effect.
203 ///
204 /// Readable | Writeable
205 /// </details>
206 ///
207 /// # Implements
208 ///
209 /// [`VideoDecoderExt`][trait@crate::prelude::VideoDecoderExt], [`trait@gst::prelude::ElementExt`], [`trait@gst::prelude::ObjectExt`], [`trait@glib::ObjectExt`], [`VideoDecoderExtManual`][trait@crate::prelude::VideoDecoderExtManual]
210 #[doc(alias = "GstVideoDecoder")]
211 pub struct VideoDecoder(Object<ffi::GstVideoDecoder, ffi::GstVideoDecoderClass>) @extends gst::Element, gst::Object;
212
213 match fn {
214 type_ => || ffi::gst_video_decoder_get_type(),
215 }
216}
217
218impl VideoDecoder {
219 pub const NONE: Option<&'static VideoDecoder> = None;
220}
221
222unsafe impl Send for VideoDecoder {}
223unsafe impl Sync for VideoDecoder {}
224
225mod sealed {
226 pub trait Sealed {}
227 impl<T: super::IsA<super::VideoDecoder>> Sealed for T {}
228}
229
230/// Trait containing all [`struct@VideoDecoder`] methods.
231///
232/// # Implementors
233///
234/// [`VideoDecoder`][struct@crate::VideoDecoder]
235pub trait VideoDecoderExt: IsA<VideoDecoder> + sealed::Sealed + 'static {
236 /// Removes next `n_bytes` of input data and adds it to currently parsed frame.
237 /// ## `n_bytes`
238 /// the number of bytes to add
239 #[doc(alias = "gst_video_decoder_add_to_frame")]
240 fn add_to_frame(&self, n_bytes: i32) {
241 unsafe {
242 ffi::gst_video_decoder_add_to_frame(self.as_ref().to_glib_none().0, n_bytes);
243 }
244 }
245
246 /// Helper function that allocates a buffer to hold a video frame for `self`'s
247 /// current [`VideoCodecState`][crate::VideoCodecState].
248 ///
249 /// You should use [`VideoDecoderExtManual::allocate_output_frame()`][crate::prelude::VideoDecoderExtManual::allocate_output_frame()] instead of this
250 /// function, if possible at all.
251 ///
252 /// # Returns
253 ///
254 /// allocated buffer, or NULL if no buffer could be
255 /// allocated (e.g. when downstream is flushing or shutting down)
256 #[doc(alias = "gst_video_decoder_allocate_output_buffer")]
257 fn allocate_output_buffer(&self) -> Result<gst::Buffer, glib::BoolError> {
258 unsafe {
259 Option::<_>::from_glib_full(ffi::gst_video_decoder_allocate_output_buffer(
260 self.as_ref().to_glib_none().0,
261 ))
262 .ok_or_else(|| glib::bool_error!("Failed to allocate output buffer"))
263 }
264 }
265
266 /// Similar to [`finish_frame()`][Self::finish_frame()], but drops `frame` in any
267 /// case and posts a QoS message with the frame's details on the bus.
268 /// In any case, the frame is considered finished and released.
269 /// ## `frame`
270 /// the [`VideoCodecFrame`][crate::VideoCodecFrame] to drop
271 ///
272 /// # Returns
273 ///
274 /// a [`gst::FlowReturn`][crate::gst::FlowReturn], usually GST_FLOW_OK.
275 #[doc(alias = "gst_video_decoder_drop_frame")]
276 fn drop_frame(&self, frame: VideoCodecFrame) -> Result<gst::FlowSuccess, gst::FlowError> {
277 unsafe {
278 try_from_glib(ffi::gst_video_decoder_drop_frame(
279 self.as_ref().to_glib_none().0,
280 frame.into_glib_ptr(),
281 ))
282 }
283 }
284
285 /// Drops input data.
286 /// The frame is not considered finished until the whole frame
287 /// is finished or dropped by the subclass.
288 /// ## `frame`
289 /// the [`VideoCodecFrame`][crate::VideoCodecFrame]
290 ///
291 /// # Returns
292 ///
293 /// a [`gst::FlowReturn`][crate::gst::FlowReturn], usually GST_FLOW_OK.
294 #[cfg(feature = "v1_20")]
295 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
296 #[doc(alias = "gst_video_decoder_drop_subframe")]
297 fn drop_subframe(&self, frame: VideoCodecFrame) -> Result<gst::FlowSuccess, gst::FlowError> {
298 unsafe {
299 try_from_glib(ffi::gst_video_decoder_drop_subframe(
300 self.as_ref().to_glib_none().0,
301 frame.into_glib_ptr(),
302 ))
303 }
304 }
305
306 /// `frame` should have a valid decoded data buffer, whose metadata fields
307 /// are then appropriately set according to frame data and pushed downstream.
308 /// If no output data is provided, `frame` is considered skipped.
309 /// In any case, the frame is considered finished and released.
310 ///
311 /// After calling this function the output buffer of the frame is to be
312 /// considered read-only. This function will also change the metadata
313 /// of the buffer.
314 /// ## `frame`
315 /// a decoded [`VideoCodecFrame`][crate::VideoCodecFrame]
316 ///
317 /// # Returns
318 ///
319 /// a [`gst::FlowReturn`][crate::gst::FlowReturn] resulting from sending data downstream
320 #[doc(alias = "gst_video_decoder_finish_frame")]
321 fn finish_frame(&self, frame: VideoCodecFrame) -> Result<gst::FlowSuccess, gst::FlowError> {
322 unsafe {
323 try_from_glib(ffi::gst_video_decoder_finish_frame(
324 self.as_ref().to_glib_none().0,
325 frame.into_glib_ptr(),
326 ))
327 }
328 }
329
330 /// Indicate that a subframe has been finished to be decoded
331 /// by the subclass. This method should be called for all subframes
332 /// except the last subframe where [`finish_frame()`][Self::finish_frame()]
333 /// should be called instead.
334 /// ## `frame`
335 /// the [`VideoCodecFrame`][crate::VideoCodecFrame]
336 ///
337 /// # Returns
338 ///
339 /// a [`gst::FlowReturn`][crate::gst::FlowReturn], usually GST_FLOW_OK.
340 #[cfg(feature = "v1_20")]
341 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
342 #[doc(alias = "gst_video_decoder_finish_subframe")]
343 fn finish_subframe(&self, frame: VideoCodecFrame) -> Result<gst::FlowSuccess, gst::FlowError> {
344 unsafe {
345 try_from_glib(ffi::gst_video_decoder_finish_subframe(
346 self.as_ref().to_glib_none().0,
347 frame.into_glib_ptr(),
348 ))
349 }
350 }
351
352 ///
353 /// # Returns
354 ///
355 /// the instance of the [`gst::BufferPool`][crate::gst::BufferPool] used
356 /// by the decoder; free it after use it
357 #[doc(alias = "gst_video_decoder_get_buffer_pool")]
358 #[doc(alias = "get_buffer_pool")]
359 fn buffer_pool(&self) -> Option<gst::BufferPool> {
360 unsafe {
361 from_glib_full(ffi::gst_video_decoder_get_buffer_pool(
362 self.as_ref().to_glib_none().0,
363 ))
364 }
365 }
366
367 ///
368 /// # Returns
369 ///
370 /// currently configured byte to time conversion setting
371 #[doc(alias = "gst_video_decoder_get_estimate_rate")]
372 #[doc(alias = "get_estimate_rate")]
373 fn estimate_rate(&self) -> i32 {
374 unsafe { ffi::gst_video_decoder_get_estimate_rate(self.as_ref().to_glib_none().0) }
375 }
376
377 /// Determines maximum possible decoding time for `frame` that will
378 /// allow it to decode and arrive in time (as determined by QoS events).
379 /// In particular, a negative result means decoding in time is no longer possible
380 /// and should therefore occur as soon/skippy as possible.
381 /// ## `frame`
382 /// a [`VideoCodecFrame`][crate::VideoCodecFrame]
383 ///
384 /// # Returns
385 ///
386 /// max decoding time.
387 #[doc(alias = "gst_video_decoder_get_max_decode_time")]
388 #[doc(alias = "get_max_decode_time")]
389 fn max_decode_time(&self, frame: &VideoCodecFrame) -> gst::ClockTimeDiff {
390 unsafe {
391 ffi::gst_video_decoder_get_max_decode_time(
392 self.as_ref().to_glib_none().0,
393 frame.to_glib_none().0,
394 )
395 }
396 }
397
398 ///
399 /// # Returns
400 ///
401 /// currently configured decoder tolerated error count.
402 #[doc(alias = "gst_video_decoder_get_max_errors")]
403 #[doc(alias = "get_max_errors")]
404 #[doc(alias = "max-errors")]
405 fn max_errors(&self) -> i32 {
406 unsafe { ffi::gst_video_decoder_get_max_errors(self.as_ref().to_glib_none().0) }
407 }
408
409 /// Queries decoder required format handling.
410 ///
411 /// # Returns
412 ///
413 /// [`true`] if required format handling is enabled.
414 #[doc(alias = "gst_video_decoder_get_needs_format")]
415 #[doc(alias = "get_needs_format")]
416 fn needs_format(&self) -> bool {
417 unsafe {
418 from_glib(ffi::gst_video_decoder_get_needs_format(
419 self.as_ref().to_glib_none().0,
420 ))
421 }
422 }
423
424 /// Queries if the decoder requires a sync point before it starts outputting
425 /// data in the beginning.
426 ///
427 /// # Returns
428 ///
429 /// [`true`] if a sync point is required in the beginning.
430 #[cfg(feature = "v1_20")]
431 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
432 #[doc(alias = "gst_video_decoder_get_needs_sync_point")]
433 #[doc(alias = "get_needs_sync_point")]
434 fn needs_sync_point(&self) -> bool {
435 unsafe {
436 from_glib(ffi::gst_video_decoder_get_needs_sync_point(
437 self.as_ref().to_glib_none().0,
438 ))
439 }
440 }
441
442 /// Queries whether input data is considered packetized or not by the
443 /// base class.
444 ///
445 /// # Returns
446 ///
447 /// TRUE if input data is considered packetized.
448 #[doc(alias = "gst_video_decoder_get_packetized")]
449 #[doc(alias = "get_packetized")]
450 fn is_packetized(&self) -> bool {
451 unsafe {
452 from_glib(ffi::gst_video_decoder_get_packetized(
453 self.as_ref().to_glib_none().0,
454 ))
455 }
456 }
457
458 /// Returns the number of bytes previously added to the current frame
459 /// by calling [`add_to_frame()`][Self::add_to_frame()].
460 ///
461 /// # Returns
462 ///
463 /// The number of bytes pending for the current frame
464 #[doc(alias = "gst_video_decoder_get_pending_frame_size")]
465 #[doc(alias = "get_pending_frame_size")]
466 fn pending_frame_size(&self) -> usize {
467 unsafe { ffi::gst_video_decoder_get_pending_frame_size(self.as_ref().to_glib_none().0) }
468 }
469
470 ///
471 /// # Returns
472 ///
473 /// The current QoS proportion.
474 #[doc(alias = "gst_video_decoder_get_qos_proportion")]
475 #[doc(alias = "get_qos_proportion")]
476 fn qos_proportion(&self) -> f64 {
477 unsafe { ffi::gst_video_decoder_get_qos_proportion(self.as_ref().to_glib_none().0) }
478 }
479
480 /// Queries whether input data is considered as subframes or not by the
481 /// base class. If FALSE, each input buffer will be considered as a full
482 /// frame.
483 ///
484 /// # Returns
485 ///
486 /// TRUE if input data is considered as sub frames.
487 #[cfg(feature = "v1_20")]
488 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
489 #[doc(alias = "gst_video_decoder_get_subframe_mode")]
490 #[doc(alias = "get_subframe_mode")]
491 fn is_subframe_mode(&self) -> bool {
492 unsafe {
493 from_glib(ffi::gst_video_decoder_get_subframe_mode(
494 self.as_ref().to_glib_none().0,
495 ))
496 }
497 }
498
499 /// Gathers all data collected for currently parsed frame, gathers corresponding
500 /// metadata and passes it along for further processing, i.e. `handle_frame`.
501 ///
502 /// # Returns
503 ///
504 /// a [`gst::FlowReturn`][crate::gst::FlowReturn]
505 #[doc(alias = "gst_video_decoder_have_frame")]
506 fn have_frame(&self) -> Result<gst::FlowSuccess, gst::FlowError> {
507 unsafe {
508 try_from_glib(ffi::gst_video_decoder_have_frame(
509 self.as_ref().to_glib_none().0,
510 ))
511 }
512 }
513
514 /// Indicates that the last subframe has been processed by the decoder
515 /// in `frame`. This will release the current frame in video decoder
516 /// allowing to receive new frames from upstream elements. This method
517 /// must be called in the subclass `handle_frame` callback.
518 /// ## `frame`
519 /// the [`VideoCodecFrame`][crate::VideoCodecFrame] to update
520 ///
521 /// # Returns
522 ///
523 /// a [`gst::FlowReturn`][crate::gst::FlowReturn], usually GST_FLOW_OK.
524 #[cfg(feature = "v1_20")]
525 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
526 #[doc(alias = "gst_video_decoder_have_last_subframe")]
527 fn have_last_subframe(
528 &self,
529 frame: &VideoCodecFrame,
530 ) -> Result<gst::FlowSuccess, gst::FlowError> {
531 unsafe {
532 try_from_glib(ffi::gst_video_decoder_have_last_subframe(
533 self.as_ref().to_glib_none().0,
534 frame.to_glib_none().0,
535 ))
536 }
537 }
538
539 /// Sets the audio decoder tags and how they should be merged with any
540 /// upstream stream tags. This will override any tags previously-set
541 /// with `gst_audio_decoder_merge_tags()`.
542 ///
543 /// Note that this is provided for convenience, and the subclass is
544 /// not required to use this and can still do tag handling on its own.
545 ///
546 /// MT safe.
547 /// ## `tags`
548 /// a [`gst::TagList`][crate::gst::TagList] to merge, or NULL to unset
549 /// previously-set tags
550 /// ## `mode`
551 /// the [`gst::TagMergeMode`][crate::gst::TagMergeMode] to use, usually [`gst::TagMergeMode::Replace`][crate::gst::TagMergeMode::Replace]
552 #[doc(alias = "gst_video_decoder_merge_tags")]
553 fn merge_tags(&self, tags: Option<&gst::TagList>, mode: gst::TagMergeMode) {
554 unsafe {
555 ffi::gst_video_decoder_merge_tags(
556 self.as_ref().to_glib_none().0,
557 tags.to_glib_none().0,
558 mode.into_glib(),
559 );
560 }
561 }
562
563 /// Returns caps that express `caps` (or sink template caps if `caps` == NULL)
564 /// restricted to resolution/format/... combinations supported by downstream
565 /// elements.
566 /// ## `caps`
567 /// initial caps
568 /// ## `filter`
569 /// filter caps
570 ///
571 /// # Returns
572 ///
573 /// a [`gst::Caps`][crate::gst::Caps] owned by caller
574 #[doc(alias = "gst_video_decoder_proxy_getcaps")]
575 fn proxy_getcaps(&self, caps: Option<&gst::Caps>, filter: Option<&gst::Caps>) -> gst::Caps {
576 unsafe {
577 from_glib_full(ffi::gst_video_decoder_proxy_getcaps(
578 self.as_ref().to_glib_none().0,
579 caps.to_glib_none().0,
580 filter.to_glib_none().0,
581 ))
582 }
583 }
584
585 /// Similar to [`drop_frame()`][Self::drop_frame()], but simply releases `frame`
586 /// without any processing other than removing it from list of pending frames,
587 /// after which it is considered finished and released.
588 /// ## `frame`
589 /// the [`VideoCodecFrame`][crate::VideoCodecFrame] to release
590 #[doc(alias = "gst_video_decoder_release_frame")]
591 fn release_frame(&self, frame: VideoCodecFrame) {
592 unsafe {
593 ffi::gst_video_decoder_release_frame(
594 self.as_ref().to_glib_none().0,
595 frame.into_glib_ptr(),
596 );
597 }
598 }
599
600 /// Allows the [`VideoDecoder`][crate::VideoDecoder] subclass to request from the base class that
601 /// a new sync should be requested from upstream, and that `frame` was the frame
602 /// when the subclass noticed that a new sync point is required. A reason for
603 /// the subclass to do this could be missing reference frames, for example.
604 ///
605 /// The base class will then request a new sync point from upstream as long as
606 /// the time that passed since the last one is exceeding
607 /// [`min-force-key-unit-interval`][struct@crate::VideoDecoder#min-force-key-unit-interval].
608 ///
609 /// The subclass can signal via `flags` how the frames until the next sync point
610 /// should be handled:
611 ///
612 /// * If [`VideoDecoderRequestSyncPointFlags::DISCARD_INPUT`][crate::VideoDecoderRequestSyncPointFlags::DISCARD_INPUT] is selected then
613 /// all following input frames until the next sync point are discarded.
614 /// This can be useful if the lack of a sync point will prevent all further
615 /// decoding and the decoder implementation is not very robust in handling
616 /// missing references frames.
617 /// * If [`VideoDecoderRequestSyncPointFlags::CORRUPT_OUTPUT`][crate::VideoDecoderRequestSyncPointFlags::CORRUPT_OUTPUT] is selected
618 /// then all output frames following `frame` are marked as corrupted via
619 /// `GST_BUFFER_FLAG_CORRUPTED`. Corrupted frames can be automatically
620 /// dropped by the base class, see [`discard-corrupted-frames`][struct@crate::VideoDecoder#discard-corrupted-frames].
621 /// Subclasses can manually mark frames as corrupted via [`VideoCodecFrameFlags::CORRUPTED`][crate::VideoCodecFrameFlags::CORRUPTED]
622 /// before calling [`finish_frame()`][Self::finish_frame()].
623 /// ## `frame`
624 /// a [`VideoCodecFrame`][crate::VideoCodecFrame]
625 /// ## `flags`
626 /// [`VideoDecoderRequestSyncPointFlags`][crate::VideoDecoderRequestSyncPointFlags]
627 #[cfg(feature = "v1_20")]
628 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
629 #[doc(alias = "gst_video_decoder_request_sync_point")]
630 fn request_sync_point(
631 &self,
632 frame: &VideoCodecFrame,
633 flags: VideoDecoderRequestSyncPointFlags,
634 ) {
635 unsafe {
636 ffi::gst_video_decoder_request_sync_point(
637 self.as_ref().to_glib_none().0,
638 frame.to_glib_none().0,
639 flags.into_glib(),
640 );
641 }
642 }
643
644 /// Allows baseclass to perform byte to time estimated conversion.
645 /// ## `enabled`
646 /// whether to enable byte to time conversion
647 #[doc(alias = "gst_video_decoder_set_estimate_rate")]
648 fn set_estimate_rate(&self, enabled: bool) {
649 unsafe {
650 ffi::gst_video_decoder_set_estimate_rate(
651 self.as_ref().to_glib_none().0,
652 enabled.into_glib(),
653 );
654 }
655 }
656
657 /// Sets numbers of tolerated decoder errors, where a tolerated one is then only
658 /// warned about, but more than tolerated will lead to fatal error. You can set
659 /// -1 for never returning fatal errors. Default is set to
660 /// GST_VIDEO_DECODER_MAX_ERRORS.
661 ///
662 /// The '-1' option was added in 1.4
663 /// ## `num`
664 /// max tolerated errors
665 #[doc(alias = "gst_video_decoder_set_max_errors")]
666 #[doc(alias = "max-errors")]
667 fn set_max_errors(&self, num: i32) {
668 unsafe {
669 ffi::gst_video_decoder_set_max_errors(self.as_ref().to_glib_none().0, num);
670 }
671 }
672
673 /// Configures decoder format needs. If enabled, subclass needs to be
674 /// negotiated with format caps before it can process any data. It will then
675 /// never be handed any data before it has been configured.
676 /// Otherwise, it might be handed data without having been configured and
677 /// is then expected being able to do so either by default
678 /// or based on the input data.
679 /// ## `enabled`
680 /// new state
681 #[doc(alias = "gst_video_decoder_set_needs_format")]
682 fn set_needs_format(&self, enabled: bool) {
683 unsafe {
684 ffi::gst_video_decoder_set_needs_format(
685 self.as_ref().to_glib_none().0,
686 enabled.into_glib(),
687 );
688 }
689 }
690
691 /// Configures whether the decoder requires a sync point before it starts
692 /// outputting data in the beginning. If enabled, the base class will discard
693 /// all non-sync point frames in the beginning and after a flush and does not
694 /// pass it to the subclass.
695 ///
696 /// If the first frame is not a sync point, the base class will request a sync
697 /// point via the force-key-unit event.
698 /// ## `enabled`
699 /// new state
700 #[cfg(feature = "v1_20")]
701 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
702 #[doc(alias = "gst_video_decoder_set_needs_sync_point")]
703 fn set_needs_sync_point(&self, enabled: bool) {
704 unsafe {
705 ffi::gst_video_decoder_set_needs_sync_point(
706 self.as_ref().to_glib_none().0,
707 enabled.into_glib(),
708 );
709 }
710 }
711
712 /// Allows baseclass to consider input data as packetized or not. If the
713 /// input is packetized, then the `parse` method will not be called.
714 /// ## `packetized`
715 /// whether the input data should be considered as packetized.
716 #[doc(alias = "gst_video_decoder_set_packetized")]
717 fn set_packetized(&self, packetized: bool) {
718 unsafe {
719 ffi::gst_video_decoder_set_packetized(
720 self.as_ref().to_glib_none().0,
721 packetized.into_glib(),
722 );
723 }
724 }
725
726 /// If this is set to TRUE, it informs the base class that the subclass
727 /// can receive the data at a granularity lower than one frame.
728 ///
729 /// Note that in this mode, the subclass has two options. It can either
730 /// require the presence of a GST_VIDEO_BUFFER_FLAG_MARKER to mark the
731 /// end of a frame. Or it can operate in such a way that it will decode
732 /// a single frame at a time. In this second case, every buffer that
733 /// arrives to the element is considered part of the same frame until
734 /// [`finish_frame()`][Self::finish_frame()] is called.
735 ///
736 /// In either case, the same [`VideoCodecFrame`][crate::VideoCodecFrame] will be passed to the
737 /// GstVideoDecoderClass:handle_frame vmethod repeatedly with a
738 /// different GstVideoCodecFrame:input_buffer every time until the end of the
739 /// frame has been signaled using either method.
740 /// This method must be called during the decoder subclass `set_format` call.
741 /// ## `subframe_mode`
742 /// whether the input data should be considered as subframes.
743 #[cfg(feature = "v1_20")]
744 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
745 #[doc(alias = "gst_video_decoder_set_subframe_mode")]
746 fn set_subframe_mode(&self, subframe_mode: bool) {
747 unsafe {
748 ffi::gst_video_decoder_set_subframe_mode(
749 self.as_ref().to_glib_none().0,
750 subframe_mode.into_glib(),
751 );
752 }
753 }
754
755 /// Lets [`VideoDecoder`][crate::VideoDecoder] sub-classes decide if they want the sink pad
756 /// to use the default pad query handler to reply to accept-caps queries.
757 ///
758 /// By setting this to true it is possible to further customize the default
759 /// handler with `GST_PAD_SET_ACCEPT_INTERSECT` and
760 /// `GST_PAD_SET_ACCEPT_TEMPLATE`
761 /// ## `use_`
762 /// if the default pad accept-caps query handling should be used
763 #[doc(alias = "gst_video_decoder_set_use_default_pad_acceptcaps")]
764 fn set_use_default_pad_acceptcaps(&self, use_: bool) {
765 unsafe {
766 ffi::gst_video_decoder_set_use_default_pad_acceptcaps(
767 self.as_ref().to_glib_none().0,
768 use_.into_glib(),
769 );
770 }
771 }
772
773 /// GstVideoDecoderRequestSyncPointFlags to use for the automatically
774 /// requested sync points if `automatic-request-sync-points` is enabled.
775 #[cfg(feature = "v1_20")]
776 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
777 #[doc(alias = "automatic-request-sync-point-flags")]
778 fn automatic_request_sync_point_flags(&self) -> VideoDecoderRequestSyncPointFlags {
779 ObjectExt::property(self.as_ref(), "automatic-request-sync-point-flags")
780 }
781
782 /// GstVideoDecoderRequestSyncPointFlags to use for the automatically
783 /// requested sync points if `automatic-request-sync-points` is enabled.
784 #[cfg(feature = "v1_20")]
785 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
786 #[doc(alias = "automatic-request-sync-point-flags")]
787 fn set_automatic_request_sync_point_flags(
788 &self,
789 automatic_request_sync_point_flags: VideoDecoderRequestSyncPointFlags,
790 ) {
791 ObjectExt::set_property(
792 self.as_ref(),
793 "automatic-request-sync-point-flags",
794 automatic_request_sync_point_flags,
795 )
796 }
797
798 /// If set to [`true`] the decoder will automatically request sync points when
799 /// it seems like a good idea, e.g. if the first frames are not key frames or
800 /// if packet loss was reported by upstream.
801 #[cfg(feature = "v1_20")]
802 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
803 #[doc(alias = "automatic-request-sync-points")]
804 fn is_automatic_request_sync_points(&self) -> bool {
805 ObjectExt::property(self.as_ref(), "automatic-request-sync-points")
806 }
807
808 /// If set to [`true`] the decoder will automatically request sync points when
809 /// it seems like a good idea, e.g. if the first frames are not key frames or
810 /// if packet loss was reported by upstream.
811 #[cfg(feature = "v1_20")]
812 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
813 #[doc(alias = "automatic-request-sync-points")]
814 fn set_automatic_request_sync_points(&self, automatic_request_sync_points: bool) {
815 ObjectExt::set_property(
816 self.as_ref(),
817 "automatic-request-sync-points",
818 automatic_request_sync_points,
819 )
820 }
821
822 /// If set to [`true`] the decoder will discard frames that are marked as
823 /// corrupted instead of outputting them.
824 #[cfg(feature = "v1_20")]
825 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
826 #[doc(alias = "discard-corrupted-frames")]
827 fn is_discard_corrupted_frames(&self) -> bool {
828 ObjectExt::property(self.as_ref(), "discard-corrupted-frames")
829 }
830
831 /// If set to [`true`] the decoder will discard frames that are marked as
832 /// corrupted instead of outputting them.
833 #[cfg(feature = "v1_20")]
834 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
835 #[doc(alias = "discard-corrupted-frames")]
836 fn set_discard_corrupted_frames(&self, discard_corrupted_frames: bool) {
837 ObjectExt::set_property(
838 self.as_ref(),
839 "discard-corrupted-frames",
840 discard_corrupted_frames,
841 )
842 }
843
844 /// Minimum interval between force-key-unit events sent upstream by the
845 /// decoder. Setting this to 0 will cause every event to be handled, setting
846 /// this to `GST_CLOCK_TIME_NONE` will cause every event to be ignored.
847 ///
848 /// See `gst_video_event_new_upstream_force_key_unit()` for more details about
849 /// force-key-unit events.
850 #[cfg(feature = "v1_20")]
851 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
852 #[doc(alias = "min-force-key-unit-interval")]
853 fn min_force_key_unit_interval(&self) -> u64 {
854 ObjectExt::property(self.as_ref(), "min-force-key-unit-interval")
855 }
856
857 /// Minimum interval between force-key-unit events sent upstream by the
858 /// decoder. Setting this to 0 will cause every event to be handled, setting
859 /// this to `GST_CLOCK_TIME_NONE` will cause every event to be ignored.
860 ///
861 /// See `gst_video_event_new_upstream_force_key_unit()` for more details about
862 /// force-key-unit events.
863 #[cfg(feature = "v1_20")]
864 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
865 #[doc(alias = "min-force-key-unit-interval")]
866 fn set_min_force_key_unit_interval(&self, min_force_key_unit_interval: u64) {
867 ObjectExt::set_property(
868 self.as_ref(),
869 "min-force-key-unit-interval",
870 min_force_key_unit_interval,
871 )
872 }
873
874 /// If set to [`true`] the decoder will handle QoS events received
875 /// from downstream elements.
876 /// This includes dropping output frames which are detected as late
877 /// using the metrics reported by those events.
878 #[cfg(feature = "v1_18")]
879 #[cfg_attr(docsrs, doc(cfg(feature = "v1_18")))]
880 fn is_qos(&self) -> bool {
881 ObjectExt::property(self.as_ref(), "qos")
882 }
883
884 /// If set to [`true`] the decoder will handle QoS events received
885 /// from downstream elements.
886 /// This includes dropping output frames which are detected as late
887 /// using the metrics reported by those events.
888 #[cfg(feature = "v1_18")]
889 #[cfg_attr(docsrs, doc(cfg(feature = "v1_18")))]
890 fn set_qos(&self, qos: bool) {
891 ObjectExt::set_property(self.as_ref(), "qos", qos)
892 }
893
894 #[cfg(feature = "v1_20")]
895 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
896 #[doc(alias = "automatic-request-sync-point-flags")]
897 fn connect_automatic_request_sync_point_flags_notify<F: Fn(&Self) + Send + Sync + 'static>(
898 &self,
899 f: F,
900 ) -> SignalHandlerId {
901 unsafe extern "C" fn notify_automatic_request_sync_point_flags_trampoline<
902 P: IsA<VideoDecoder>,
903 F: Fn(&P) + Send + Sync + 'static,
904 >(
905 this: *mut ffi::GstVideoDecoder,
906 _param_spec: glib::ffi::gpointer,
907 f: glib::ffi::gpointer,
908 ) {
909 let f: &F = &*(f as *const F);
910 f(VideoDecoder::from_glib_borrow(this).unsafe_cast_ref())
911 }
912 unsafe {
913 let f: Box_<F> = Box_::new(f);
914 connect_raw(
915 self.as_ptr() as *mut _,
916 b"notify::automatic-request-sync-point-flags\0".as_ptr() as *const _,
917 Some(std::mem::transmute::<*const (), unsafe extern "C" fn()>(
918 notify_automatic_request_sync_point_flags_trampoline::<Self, F> as *const (),
919 )),
920 Box_::into_raw(f),
921 )
922 }
923 }
924
925 #[cfg(feature = "v1_20")]
926 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
927 #[doc(alias = "automatic-request-sync-points")]
928 fn connect_automatic_request_sync_points_notify<F: Fn(&Self) + Send + Sync + 'static>(
929 &self,
930 f: F,
931 ) -> SignalHandlerId {
932 unsafe extern "C" fn notify_automatic_request_sync_points_trampoline<
933 P: IsA<VideoDecoder>,
934 F: Fn(&P) + Send + Sync + 'static,
935 >(
936 this: *mut ffi::GstVideoDecoder,
937 _param_spec: glib::ffi::gpointer,
938 f: glib::ffi::gpointer,
939 ) {
940 let f: &F = &*(f as *const F);
941 f(VideoDecoder::from_glib_borrow(this).unsafe_cast_ref())
942 }
943 unsafe {
944 let f: Box_<F> = Box_::new(f);
945 connect_raw(
946 self.as_ptr() as *mut _,
947 b"notify::automatic-request-sync-points\0".as_ptr() as *const _,
948 Some(std::mem::transmute::<*const (), unsafe extern "C" fn()>(
949 notify_automatic_request_sync_points_trampoline::<Self, F> as *const (),
950 )),
951 Box_::into_raw(f),
952 )
953 }
954 }
955
956 #[cfg(feature = "v1_20")]
957 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
958 #[doc(alias = "discard-corrupted-frames")]
959 fn connect_discard_corrupted_frames_notify<F: Fn(&Self) + Send + Sync + 'static>(
960 &self,
961 f: F,
962 ) -> SignalHandlerId {
963 unsafe extern "C" fn notify_discard_corrupted_frames_trampoline<
964 P: IsA<VideoDecoder>,
965 F: Fn(&P) + Send + Sync + 'static,
966 >(
967 this: *mut ffi::GstVideoDecoder,
968 _param_spec: glib::ffi::gpointer,
969 f: glib::ffi::gpointer,
970 ) {
971 let f: &F = &*(f as *const F);
972 f(VideoDecoder::from_glib_borrow(this).unsafe_cast_ref())
973 }
974 unsafe {
975 let f: Box_<F> = Box_::new(f);
976 connect_raw(
977 self.as_ptr() as *mut _,
978 b"notify::discard-corrupted-frames\0".as_ptr() as *const _,
979 Some(std::mem::transmute::<*const (), unsafe extern "C" fn()>(
980 notify_discard_corrupted_frames_trampoline::<Self, F> as *const (),
981 )),
982 Box_::into_raw(f),
983 )
984 }
985 }
986
987 #[cfg(feature = "v1_18")]
988 #[cfg_attr(docsrs, doc(cfg(feature = "v1_18")))]
989 #[doc(alias = "max-errors")]
990 fn connect_max_errors_notify<F: Fn(&Self) + Send + Sync + 'static>(
991 &self,
992 f: F,
993 ) -> SignalHandlerId {
994 unsafe extern "C" fn notify_max_errors_trampoline<
995 P: IsA<VideoDecoder>,
996 F: Fn(&P) + Send + Sync + 'static,
997 >(
998 this: *mut ffi::GstVideoDecoder,
999 _param_spec: glib::ffi::gpointer,
1000 f: glib::ffi::gpointer,
1001 ) {
1002 let f: &F = &*(f as *const F);
1003 f(VideoDecoder::from_glib_borrow(this).unsafe_cast_ref())
1004 }
1005 unsafe {
1006 let f: Box_<F> = Box_::new(f);
1007 connect_raw(
1008 self.as_ptr() as *mut _,
1009 b"notify::max-errors\0".as_ptr() as *const _,
1010 Some(std::mem::transmute::<*const (), unsafe extern "C" fn()>(
1011 notify_max_errors_trampoline::<Self, F> as *const (),
1012 )),
1013 Box_::into_raw(f),
1014 )
1015 }
1016 }
1017
1018 #[cfg(feature = "v1_20")]
1019 #[cfg_attr(docsrs, doc(cfg(feature = "v1_20")))]
1020 #[doc(alias = "min-force-key-unit-interval")]
1021 fn connect_min_force_key_unit_interval_notify<F: Fn(&Self) + Send + Sync + 'static>(
1022 &self,
1023 f: F,
1024 ) -> SignalHandlerId {
1025 unsafe extern "C" fn notify_min_force_key_unit_interval_trampoline<
1026 P: IsA<VideoDecoder>,
1027 F: Fn(&P) + Send + Sync + 'static,
1028 >(
1029 this: *mut ffi::GstVideoDecoder,
1030 _param_spec: glib::ffi::gpointer,
1031 f: glib::ffi::gpointer,
1032 ) {
1033 let f: &F = &*(f as *const F);
1034 f(VideoDecoder::from_glib_borrow(this).unsafe_cast_ref())
1035 }
1036 unsafe {
1037 let f: Box_<F> = Box_::new(f);
1038 connect_raw(
1039 self.as_ptr() as *mut _,
1040 b"notify::min-force-key-unit-interval\0".as_ptr() as *const _,
1041 Some(std::mem::transmute::<*const (), unsafe extern "C" fn()>(
1042 notify_min_force_key_unit_interval_trampoline::<Self, F> as *const (),
1043 )),
1044 Box_::into_raw(f),
1045 )
1046 }
1047 }
1048
1049 #[cfg(feature = "v1_18")]
1050 #[cfg_attr(docsrs, doc(cfg(feature = "v1_18")))]
1051 #[doc(alias = "qos")]
1052 fn connect_qos_notify<F: Fn(&Self) + Send + Sync + 'static>(&self, f: F) -> SignalHandlerId {
1053 unsafe extern "C" fn notify_qos_trampoline<
1054 P: IsA<VideoDecoder>,
1055 F: Fn(&P) + Send + Sync + 'static,
1056 >(
1057 this: *mut ffi::GstVideoDecoder,
1058 _param_spec: glib::ffi::gpointer,
1059 f: glib::ffi::gpointer,
1060 ) {
1061 let f: &F = &*(f as *const F);
1062 f(VideoDecoder::from_glib_borrow(this).unsafe_cast_ref())
1063 }
1064 unsafe {
1065 let f: Box_<F> = Box_::new(f);
1066 connect_raw(
1067 self.as_ptr() as *mut _,
1068 b"notify::qos\0".as_ptr() as *const _,
1069 Some(std::mem::transmute::<*const (), unsafe extern "C" fn()>(
1070 notify_qos_trampoline::<Self, F> as *const (),
1071 )),
1072 Box_::into_raw(f),
1073 )
1074 }
1075 }
1076}
1077
1078impl<O: IsA<VideoDecoder>> VideoDecoderExt for O {}