Documentation
¶
Index ¶
- Constants
- func Debug(state bool)
- func HydrateMediaObject(id string, parent IMediaObject, c *Connection, elem IMediaObject) error
- type AlphaBlending
- type AudioCaps
- type AudioCodec
- type BaseRtpEndpoint
- type CertificateKeyType
- type CodecConfiguration
- type Composite
- type Connection
- func (c *Connection) Close() error
- func (c *Connection) Create(m IMediaObject, options map[string]interface{}) error
- func (c *Connection) Request(req map[string]interface{}) <-chan Response
- func (c *Connection) Subscribe(event, objectId, handlerId string, handler eventHandler)
- func (c *Connection) Unsubscribe(event, objectId, handlerId string)
- type ConnectionState
- type CryptoSuite
- type DSCPValue
- type Dispatcher
- type DispatcherOneToMany
- type ElementConnectionData
- type ElementStats
- type Endpoint
- type EndpointStats
- type Error
- type Event
- type Filter
- type FilterType
- type Fraction
- type GapsFixMethod
- type GstreamerDotDetails
- type HttpEndpoint
- type HttpPostEndpoint
- type Hub
- type HubPort
- type IAlphaBlending
- type IBaseRtpEndpoint
- type IComposite
- type ICustomSerializer
- type IDispatcher
- type IDispatcherOneToMany
- type IEndpoint
- type IFilter
- type IHttpEndpoint
- type IHttpPostEndpoint
- type IHub
- type IHubPort
- type IMediaElement
- type IMediaObject
- type IMediaPipeline
- type IMixer
- type IPassThrough
- type IPlayerEndpoint
- type IRecorderEndpoint
- type IRtpEndpoint
- type ISdpEndpoint
- type IServerManager
- type ISessionEndpoint
- type IUriEndpoint
- type IWebRtcEndpoint
- type IceCandidate
- type IceCandidatePair
- type IceComponentState
- type IceConnection
- type MediaElement
- func (elem *MediaElement) Connect(sink IMediaElement, mediaType MediaType, sourceMediaDescription string, ...) error
- func (elem *MediaElement) Disconnect(sink IMediaElement, mediaType MediaType, sourceMediaDescription string, ...) error
- func (elem *MediaElement) GetGstreamerDot(details GstreamerDotDetails) (string, error)
- func (elem *MediaElement) GetSinkConnections(mediaType MediaType, description string) ([]ElementConnectionData, error)
- func (elem *MediaElement) GetSourceConnections(mediaType MediaType, description string) ([]ElementConnectionData, error)
- func (elem *MediaElement) GetStats(mediaType MediaType) (map[string]Stats, error)
- func (elem *MediaElement) IsMediaFlowingIn(mediaType MediaType, sinkMediaDescription string) (bool, error)
- func (elem *MediaElement) IsMediaFlowingOut(mediaType MediaType, sourceMediaDescription string) (bool, error)
- func (elem *MediaElement) IsMediaTranscoding(mediaType MediaType, binName string) (bool, error)
- func (elem *MediaElement) SetAudioFormat(caps AudioCaps) error
- func (elem *MediaElement) SetOutputBitrate(bitrate int) error
- func (elem *MediaElement) SetVideoFormat(caps VideoCaps) error
- type MediaFlowState
- type MediaLatencyStat
- type MediaObject
- func (elem *MediaObject) AddTag(key string, value string) error
- func (elem *MediaObject) Create(m IMediaObject, options map[string]interface{}) error
- func (elem *MediaObject) GetTag(key string) (string, error)
- func (elem *MediaObject) GetTags() ([]Tag, error)
- func (elem *MediaObject) Release() error
- func (elem *MediaObject) RemoveTag(key string) error
- func (m *MediaObject) String() string
- func (elem *MediaObject) Subscribe(event string, cb eventHandler) (string, error)
- type MediaPipeline
- type MediaProfileSpecType
- type MediaState
- type MediaTranscodingState
- type MediaType
- type Mixer
- type ModuleInfo
- type OfferOptions
- type PassThrough
- type PlayerEndpoint
- type RTCCertificateStats
- type RTCCodec
- type RTCDataChannelState
- type RTCDataChannelStats
- type RTCIceCandidateAttributes
- type RTCIceCandidatePairStats
- type RTCInboundRTPStreamStats
- type RTCMediaStreamStats
- type RTCMediaStreamTrackStats
- type RTCOutboundRTPStreamStats
- type RTCPeerConnectionStats
- type RTCRTPStreamStats
- type RTCStats
- type RTCStatsIceCandidatePairState
- type RTCStatsIceCandidateType
- type RTCTransportStats
- type RecorderEndpoint
- type RembParams
- type Response
- type RtpEndpoint
- type SDES
- type SdpEndpoint
- func (elem *SdpEndpoint) GenerateOffer(options OfferOptions) (string, error)
- func (elem *SdpEndpoint) GetLocalSessionDescriptor() (string, error)
- func (elem *SdpEndpoint) GetRemoteSessionDescriptor() (string, error)
- func (elem *SdpEndpoint) ProcessAnswer(answer string) (string, error)
- func (elem *SdpEndpoint) ProcessOffer(offer string) (string, error)
- type ServerInfo
- type ServerManager
- type ServerType
- type SessionEndpoint
- type Stats
- type StatsType
- type Tag
- type UriEndpoint
- type UriEndpointState
- type VideoCaps
- type VideoCodec
- type VideoInfo
- type WebRtcEndpoint
- func (elem *WebRtcEndpoint) AddIceCandidate(candidate IceCandidate) error
- func (elem *WebRtcEndpoint) CloseDataChannel(channelId int) error
- func (elem *WebRtcEndpoint) CreateDataChannel(label string, ordered bool, maxPacketLifeTime int, maxRetransmits int, ...) error
- func (elem *WebRtcEndpoint) GatherCandidates() error
- Bugs
Constants ¶
const ConnectionLost = -1
Variables ¶
This section is empty.
Functions ¶
func HydrateMediaObject ¶
func HydrateMediaObject(id string, parent IMediaObject, c *Connection, elem IMediaObject) error
Create an object in memory that represents a remote object without creating it
Types ¶
type AlphaBlending ¶
type AlphaBlending struct {
Hub
}
A `Hub` that mixes the :rom:attr:`MediaType.AUDIO` stream of its connected sources and constructs one output with :rom:attr:`MediaType.VIDEO` streams of its connected sources into its sink
func (*AlphaBlending) SetMaster ¶
func (elem *AlphaBlending) SetMaster(source HubPort, zOrder int) error
Sets the source port that will be the master entry to the mixer
func (*AlphaBlending) SetPortProperties ¶
func (elem *AlphaBlending) SetPortProperties(relativeX float64, relativeY float64, zOrder int, relativeWidth float64, relativeHeight float64, port HubPort) error
Configure the blending mode of one port.
type AudioCaps ¶
type AudioCaps struct { Codec AudioCodec Bitrate int }
type AudioCodec ¶
type AudioCodec string
Codec used for transmission of audio.
const ( AUDIOCODEC_OPUS AudioCodec = "OPUS" AUDIOCODEC_PCMU AudioCodec = "PCMU" AUDIOCODEC_RAW AudioCodec = "RAW" )
type BaseRtpEndpoint ¶
type BaseRtpEndpoint struct { SdpEndpoint // Minimum input bitrate, requested from WebRTC senders with REMB. // <p> // This is used to set a minimum value of local REMB during bandwidth estimation, // if supported by the implementing class. The REMB estimation will then be sent // to remote peers, requesting them to send at least the indicated video bitrate. // It follows that min values will only have effect in remote peers that support // this congestion control mechanism, such as Chrome. // </p> // <ul> // <li>Unit: kbps (kilobits per second).</li> // <li>Default: 0.</li> // <li> // Note: The absolute minimum REMB value is 30 kbps, even if a lower value is // set here. // </li> // </ul> // MinVideoRecvBandwidth int // REMB override of minimum bitrate sent to WebRTC receivers. // <p> // With this parameter you can control the minimum video quality that will be // sent when reacting to bad network conditions. Setting this parameter to a low // value permits the video quality to drop when the network conditions get worse. // </p> // <p> // This parameter provides a way to override the bitrate requested by remote REMB // bandwidth estimations: the bitrate sent will be always equal or greater than // this parameter, even if the remote peer requests even lower bitrates. // </p> // <p> // Note that if you set this parameter too high (trying to avoid bad video // quality altogether), you would be limiting the adaptation ability of the // congestion control algorithm, and your stream might be unable to ever recover // from adverse network conditions. // </p> // <ul> // <li>Unit: kbps (kilobits per second).</li> // <li>Default: 100.</li> // <li> // 0 = unlimited: the video bitrate will drop as needed, even to the lowest // possible quality, which might make the video completely blurry and // pixelated. // </li> // </ul> // MinVideoSendBandwidth int // REMB override of maximum bitrate sent to WebRTC receivers. // <p> // With this parameter you can control the maximum video quality that will be // sent when reacting to good network conditions. Setting this parameter to a // high value permits the video quality to raise when the network conditions get // better. // </p> // <p> // This parameter provides a way to limit the bitrate requested by remote REMB // bandwidth estimations: the bitrate sent will be always equal or less than this // parameter, even if the remote peer requests higher bitrates. // </p> // <p> // Note that the default value of <strong>500 kbps</strong> is a VERY // conservative one, and leads to a low maximum video quality. Most applications // will probably want to increase this to higher values such as 2000 kbps (2 // mbps). // </p> // <p> // The REMB congestion control algorithm works by gradually increasing the output // video bitrate, until the available bandwidth is fully used or the maximum send // bitrate has been reached. This is a slow, progressive change, which starts at // 300 kbps by default. You can change the default starting point of REMB // estimations, by setting <code>RembParams.rembOnConnect</code>. // </p> // <ul> // <li>Unit: kbps (kilobits per second).</li> // <li>Default: 500.</li> // <li> // 0 = unlimited: the video bitrate will grow until all the available network // bandwidth is used by the stream.<br /> // Note that this might have a bad effect if more than one stream is running // (as all of them would try to raise the video bitrate indefinitely, until the // network gets saturated). // </li> // </ul> // MaxVideoSendBandwidth int // Media flow state. // <ul> // <li>CONNECTED: There is an RTCP flow.</li> // <li>DISCONNECTED: No RTCP packets have been received for at least 5 sec.</li> // </ul> // MediaState *MediaState // Connection state. // <ul> // <li>CONNECTED</li> // <li>DISCONNECTED</li> // </ul> // ConnectionState *ConnectionState // Maximum Transmission Unit (MTU) used for RTP. // <p> // This setting affects the maximum size that will be used by RTP payloads. You // can change it from the default, if you think that a different value would be // beneficial for the typical network settings of your application. // </p> // <p> // The default value is 1200 Bytes. This is the same as in <b>libwebrtc</b> (from // webrtc.org), as used by // <a // href='https://dxr.mozilla.org/mozilla-central/rev/b5c5ba07d3dbd0d07b66fa42a103f4df2c27d3a2/media/webrtc/trunk/webrtc/media/engine/constants.cc#16' // >Firefox</a // > // or // <a // href='https://source.chromium.org/chromium/external/webrtc/src/+/6dd488b2e55125644263e4837f1abd950d5e410d:media/engine/constants.cc;l=15' // >Chrome</a // > // . You can read more about this value in // <a // href='https://groups.google.com/d/topic/discuss-webrtc/gH5ysR3SoZI/discussion' // >Why RTP max packet size is 1200 in WebRTC?</a // > // . // </p> // <p> // <b>WARNING</b>: Change this value ONLY if you really know what you are doing // and you have strong reasons to do so. Do NOT change this parameter just // because it <i>seems</i> to work better for some reduced scope tests. The // default value is a consensus chosen by people who have deep knowledge about // network optimization. // </p> // <ul> // <li>Unit: Bytes.</li> // <li>Default: 1200.</li> // </ul> // Mtu int // Advanced parameters to configure the congestion control algorithm. RembParams *RembParams }
Handles RTP communications. <p> All endpoints that rely on the RTP protocol, like the <strong>RtpEndpoint</strong> or the <strong>WebRtcEndpoint</strong>, inherit from this class. The endpoint provides information about the Connection state and the Media state, which can be consulted at any time through the :rom:attr:`getMediaState` and the :rom:attr:`getConnectionState` methods. It is also possible subscribe to events fired when these properties change: </p> <ul> <li> <strong>:rom:evt:`ConnectionStateChanged`</strong>: This event is raised when the connection between two peers changes. It can have two values: <ul> <li>CONNECTED</li> <li>DISCONNECTED</li> </ul> </li> <li> <strong>:rom:evt:`MediaStateChanged`</strong>: This event provides information about the state of the underlying RTP session. Possible values are: <ul> <li>CONNECTED: There is an RTCP packet flow between peers.</li> <li> DISCONNECTED: Either no RTCP packets have been received yet, or the remote peer has ended the RTP session with a <code>BYE</code> message, or at least 5 seconds have elapsed since the last RTCP packet was received. </li> </ul> <p> The standard definition of RTP (<a href='https://tools.ietf.org/html/rfc3550' target='_blank' >RFC 3550</a >) describes a session as active whenever there is a maintained flow of RTCP control packets, regardless of whether there is actual media flowing through RTP data packets or not. The reasoning behind this is that, at any given moment, a participant of an RTP session might temporarily stop sending RTP data packets, but this wouldn't necessarily mean that the RTP session as a whole is finished; it maybe just means that the participant has some temporary issues but it will soon resume sending data. For this reason, that an RTP session has really finished is something that is considered only by the prolonged absence of RTCP control packets between participants. </p> <p> Since RTCP packets do not flow at a constant rate (for instance, minimizing a browser window with a WebRTC's <code>RTCPeerConnection</code> object might affect the sending interval), it is not possible to immediately detect their absence and assume that the RTP session has finished. Instead, there is a guard period of approximately <strong>5 seconds</strong> of missing RTCP packets before considering that the underlying RTP session is effectively finished, thus triggering a <code>MediaStateChangedEvent = DISCONNECTED</code> event. </p> <p> In other words, there is always a period during which there might be no media flowing, but this event hasn't been fired yet. Nevertheless, this is the most reliable and useful way of knowing what is the long-term, steady state of RTP media exchange. </p> <p> The :rom:evt:`ConnectionStateChanged` comes in contrast with more instantaneous events such as MediaElement's :rom:evt:`MediaFlowInStateChanged` and :rom:evt:`MediaFlowOutStateChanged`, which are triggered almost immediately after the RTP data packets stop flowing between RTP session participants. This makes the <em>MediaFlow</em> events a good way to know if participants are suffering from short-term intermittent connectivity issues, but they are not enough to know if the connectivity issues are just spurious network hiccups or are part of a more long-term disconnection problem. </p> </li> </ul> <p> Part of the bandwidth control for the video component of the media session is done here: </p> <ul> <li> Input bandwidth: Values used to inform remote peers about the bitrate that can be sent to this endpoint. <ul> <li> <strong>MinVideoRecvBandwidth</strong>: Minimum input bitrate, requested from WebRTC senders with REMB (Default: 30 Kbps). </li> <li> <strong>MaxAudioRecvBandwidth</strong> and <strong>MaxVideoRecvBandwidth</strong>: Maximum input bitrate, signaled in SDP Offers to WebRTC and RTP senders (Default: unlimited). </li> </ul> </li> <li> Output bandwidth: Values used to control bitrate of the video streams sent to remote peers. It is important to keep in mind that pushed bitrate depends on network and remote peer capabilities. Remote peers can also announce bandwidth limitation in their SDPs (through the <code>b={modifier}:{value}</code> attribute). Kurento will always enforce bitrate limitations specified by the remote peer over internal configurations. <ul> <li> <strong>MinVideoSendBandwidth</strong>: REMB override of minimum bitrate sent to WebRTC receivers (Default: 100 Kbps). </li> <li> <strong>MaxVideoSendBandwidth</strong>: REMB override of maximum bitrate sent to WebRTC receivers (Default: 500 Kbps). </li> <li> <strong>RembParams.rembOnConnect</strong>: Initial local REMB bandwidth estimation that gets propagated when a new endpoint is connected. </li> </ul> </li> </ul> <p> <strong> All bandwidth control parameters must be changed before the SDP negotiation takes place, and can't be changed afterwards. </strong> </p>
type CertificateKeyType ¶
type CertificateKeyType string
.
const ( CERTIFICATEKEYTYPE_RSA CertificateKeyType = "RSA" CERTIFICATEKEYTYPE_ECDSA CertificateKeyType = "ECDSA" )
func (CertificateKeyType) String ¶
func (t CertificateKeyType) String() string
Implement fmt.Stringer interface
type CodecConfiguration ¶
type Composite ¶
type Composite struct {
Hub
}
A `Hub` that mixes the :rom:attr:`MediaType.AUDIO` stream of its connected sources and constructs a grid with the :rom:attr:`MediaType.VIDEO` streams of its connected sources into its sink
type Connection ¶
type Connection struct { SessionId string Dead chan bool IsDead bool // contains filtered or unexported fields }
func NewConnection ¶
func NewConnection(host string) (*Connection, error)
func (*Connection) Close ¶
func (c *Connection) Close() error
func (*Connection) Create ¶
func (c *Connection) Create(m IMediaObject, options map[string]interface{}) error
func (*Connection) Request ¶
func (c *Connection) Request(req map[string]interface{}) <-chan Response
func (*Connection) Subscribe ¶
func (c *Connection) Subscribe(event, objectId, handlerId string, handler eventHandler)
func (*Connection) Unsubscribe ¶
func (c *Connection) Unsubscribe(event, objectId, handlerId string)
type ConnectionState ¶
type ConnectionState string
State of the connection.
const ( CONNECTIONSTATE_DISCONNECTED ConnectionState = "DISCONNECTED" CONNECTIONSTATE_CONNECTED ConnectionState = "CONNECTED" )
func (ConnectionState) String ¶
func (t ConnectionState) String() string
Implement fmt.Stringer interface
type CryptoSuite ¶
type CryptoSuite string
Describes the encryption and authentication algorithms
const ( CRYPTOSUITE_AES_128_CM_HMAC_SHA1_32 CryptoSuite = "AES_128_CM_HMAC_SHA1_32" CRYPTOSUITE_AES_128_CM_HMAC_SHA1_80 CryptoSuite = "AES_128_CM_HMAC_SHA1_80" CRYPTOSUITE_AES_256_CM_HMAC_SHA1_32 CryptoSuite = "AES_256_CM_HMAC_SHA1_32" CRYPTOSUITE_AES_256_CM_HMAC_SHA1_80 CryptoSuite = "AES_256_CM_HMAC_SHA1_80" )
type DSCPValue ¶
type DSCPValue string
Possible DSCP values <p> WebRTC recommended values are taken from RFC 8837 https://datatracker.ietf.org/doc/html/rfc8837#section-5 , These are the values from AUDIO_VERYLOW to DATA_HIGH. First element in the name indicates kind of traffic that it should apply to, the second indicates relative priority. For video, a third field would indicate if the traffic is intended for high throughput or not. As indicated on RFC 8837 section 5 diagram:
+=======================+==========+=====+============+============+ | Flow Type | Very Low | Low | Medium | High | +=======================+==========+=====+============+============+ | Audio | LE (1) | DF | EF (46) | EF (46) | | | | (0) | | | +-----------------------+----------+-----+------------+------------+ +-----------------------+----------+-----+------------+------------+ | Interactive Video | LE (1) | DF | AF42, AF43 | AF41, AF42 | | with or without Audio | | (0) | (36, 38) | (34, 36) | +-----------------------+----------+-----+------------+------------+ +-----------------------+----------+-----+------------+------------+ | Non-Interactive Video | LE (1) | DF | AF32, AF33 | AF31, AF32 | | with or without Audio | | (0) | (28, 30) | (26, 28) | +-----------------------+----------+-----+------------+------------+ +-----------------------+----------+-----+------------+------------+ | Data | LE (1) | DF | AF11 | AF21 | | | | (0) | | | +-----------------------+----------+-----+------------+------------+
As indicated also in RFC, non interactive video is not considered <p> Apart from the WebRTC recommended values, we also include all possible values are referenced in http://www.iana.org/assignments/dscp-registry/dscp-registry.xml of course some of those values are synonyms for the WebRTC recommended ones, they are included mainly for completeness <p> And as a last point, we include a shorthand for Chrome supported markings for low (CS0), very low (CS1), medium (CS7) and high (CS7) priorities in priority property for RTCRtpSender parameters. See https://developer.mozilla.org/en-US/docs/Web/API/RTCRtpSender/setParameters <p> This only covers outgoing network packets from KMS, to complete the solution, DSCP must be also requested on client, unfortunately for traffic on the other direction, this must be requested to the browser or client. On browser, the client application needs to use the following API https://www.w3.org/TR/webrtc-priority/
const ( DSCPVALUE_NO_DSCP DSCPValue = "NO_DSCP" DSCPVALUE_NO_VALUE DSCPValue = "NO_VALUE" DSCPVALUE_AUDIO_VERYLOW DSCPValue = "AUDIO_VERYLOW" DSCPVALUE_AUDIO_LOW DSCPValue = "AUDIO_LOW" DSCPVALUE_AUDIO_MEDIUM DSCPValue = "AUDIO_MEDIUM" DSCPVALUE_AUDIO_HIGH DSCPValue = "AUDIO_HIGH" DSCPVALUE_VIDEO_VERYLOW DSCPValue = "VIDEO_VERYLOW" DSCPVALUE_VIDEO_LOW DSCPValue = "VIDEO_LOW" DSCPVALUE_VIDEO_MEDIUM DSCPValue = "VIDEO_MEDIUM" DSCPVALUE_VIDEO_MEDIUM_THROUGHPUT DSCPValue = "VIDEO_MEDIUM_THROUGHPUT" DSCPVALUE_VIDEO_HIGH DSCPValue = "VIDEO_HIGH" DSCPVALUE_VIDEO_HIGH_THROUGHPUT DSCPValue = "VIDEO_HIGH_THROUGHPUT" DSCPVALUE_DATA_VERYLOW DSCPValue = "DATA_VERYLOW" DSCPVALUE_DATA_LOW DSCPValue = "DATA_LOW" DSCPVALUE_DATA_MEDIUM DSCPValue = "DATA_MEDIUM" DSCPVALUE_DATA_HIGH DSCPValue = "DATA_HIGH" DSCPVALUE_CHROME_HIGH DSCPValue = "CHROME_HIGH" DSCPVALUE_CHROME_MEDIUM DSCPValue = "CHROME_MEDIUM" DSCPVALUE_CHROME_LOW DSCPValue = "CHROME_LOW" DSCPVALUE_CHROME_VERYLOW DSCPValue = "CHROME_VERYLOW" DSCPVALUE_CS0 DSCPValue = "CS0" DSCPVALUE_CS1 DSCPValue = "CS1" DSCPVALUE_CS2 DSCPValue = "CS2" DSCPVALUE_CS3 DSCPValue = "CS3" DSCPVALUE_CS4 DSCPValue = "CS4" DSCPVALUE_CS5 DSCPValue = "CS5" DSCPVALUE_CS6 DSCPValue = "CS6" DSCPVALUE_CS7 DSCPValue = "CS7" DSCPVALUE_AF11 DSCPValue = "AF11" DSCPVALUE_AF12 DSCPValue = "AF12" DSCPVALUE_AF13 DSCPValue = "AF13" DSCPVALUE_AF21 DSCPValue = "AF21" DSCPVALUE_AF22 DSCPValue = "AF22" DSCPVALUE_AF23 DSCPValue = "AF23" DSCPVALUE_AF31 DSCPValue = "AF31" DSCPVALUE_AF32 DSCPValue = "AF32" DSCPVALUE_AF33 DSCPValue = "AF33" DSCPVALUE_AF41 DSCPValue = "AF41" DSCPVALUE_AF42 DSCPValue = "AF42" DSCPVALUE_AF43 DSCPValue = "AF43" DSCPVALUE_EF DSCPValue = "EF" DSCPVALUE_VOICEADMIT DSCPValue = "VOICEADMIT" DSCPVALUE_LE DSCPValue = "LE" )
type Dispatcher ¶
type Dispatcher struct {
Hub
}
A `Hub` that allows routing between arbitrary port pairs
type DispatcherOneToMany ¶
type DispatcherOneToMany struct {
Hub
}
A `Hub` that sends a given source to all the connected sinks
func (*DispatcherOneToMany) RemoveSource ¶
func (elem *DispatcherOneToMany) RemoveSource() error
Remove the source port and stop the media pipeline.
func (*DispatcherOneToMany) SetSource ¶
func (elem *DispatcherOneToMany) SetSource(source HubPort) error
Sets the source port that will be connected to the sinks of every `HubPort` of the dispatcher
type ElementConnectionData ¶
type ElementConnectionData struct { Source MediaElement Sink MediaElement Type MediaType SourceDescription string SinkDescription string }
type ElementStats ¶
type ElementStats struct { InputAudioLatency float64 InputVideoLatency float64 InputLatency []MediaLatencyStat }
type Endpoint ¶
type Endpoint struct {
MediaElement
}
Base interface for all end points. <p> An Endpoint is a `MediaElement` that allows Kurento to exchange media contents with external systems, supporting different transport protocols and mechanisms, such as RTP, WebRTC, HTTP(s), "file://" URLs, etc. </p> <p> An "Endpoint" may contain both sources and sinks for different media types, to provide bidirectional communication. </p>
type EndpointStats ¶
type EndpointStats struct { AudioE2ELatency float64 VideoE2ELatency float64 E2ELatency []MediaLatencyStat }
type Filter ¶
type Filter struct {
MediaElement
}
Base interface for all filters. <p> This is a certain type of `MediaElement`, that processes media injected through its sinks, and delivers the outcome through its sources. </p>
type FilterType ¶
type FilterType string
Type of filter to be created. Can take the values AUDIO, VIDEO or AUTODETECT.
const ( FILTERTYPE_AUDIO FilterType = "AUDIO" FILTERTYPE_AUTODETECT FilterType = "AUTODETECT" FILTERTYPE_VIDEO FilterType = "VIDEO" )
type GapsFixMethod ¶
type GapsFixMethod string
How to fix gaps when they are found in the recorded stream. <p> Gaps are typically caused by packet loss in the input streams, such as when an RTP or WebRTC media flow suffers from network congestion and some packets don't arrive at the media server. </p> <p>Different ways of handling gaps have different tradeoffs:</p> <ul> <li> <strong>NONE</strong>: Do not fix gaps. <p> Leave the stream as-is, and store it with any gaps that the stream might have. Some players are clever enough to adapt to this during playback, so that the gaps are reduced to a minimum and no problems are perceived by the user; other players are not so sophisticated, and will struggle trying to decode a file that contains gaps. For example, trying to play such a file directly with Chrome will cause lipsync issues (audio and video will fall out of sync). </p> <p> This is the best choice if you need consistent durations across multiple simultaneous recordings, or if you are anyway going to post-process the recordings (e.g. with an extra FFmpeg step). </p> <p> For example, assume a session length of 15 seconds: packets arrive correctly during the first 5 seconds, then there is a gap, then data arrives again for the last 5 seconds. Also, for simplicity, assume 1 frame per second. With no fix for gaps, the RecorderEndpoint will store each frame as-is, with these timestamps: </p> <pre> frame 1 - 00:01 frame 2 - 00:02 frame 3 - 00:03 frame 4 - 00:04 frame 5 - 00:05 frame 11 - 00:11 frame 12 - 00:12 frame 13 - 00:13 frame 14 - 00:14 frame 15 - 00:15 </pre> <p> Notice how the frames between 6 to 10 are missing, but the last 5 frames still conserve their original timestamp. The total length of the file is detected as 15 seconds by most players, although playback could stutter or hang during the missing section. </p> </li> <li> <strong>GENPTS</strong>: Adjust timestamps to generate a smooth progression over all frames. <p> This technique rewrites the timestamp of all frames, so that gaps are suppressed. It provides the best playback experience for recordings that need to be played as-is (i.e. they won't be post-processed). However, fixing timestamps might cause a change in the total duration of a file. So different recordings from the same session might end up with slightly different durations. </p> <p> In our example, the RecorderEndpoint will change all timestamps that follow a gap in the stream, and store each frame as follows: </p> <pre> frame 1 - 00:01 frame 2 - 00:02 frame 3 - 00:03 frame 4 - 00:04 frame 5 - 00:05 frame 11 - 00:06 frame 12 - 00:07 frame 13 - 00:08 frame 14 - 00:09 frame 15 - 00:10 </pre> <p> Notice how the frames between 6 to 10 are missing, and the last 5 frames have their timestamps corrected to provide a smooth increment over the previous ones. The total length of the file is detected as 10 seconds, and playback should be correct throughout the whole file. </p> </li> <li> <strong>FILL_IF_TRANSCODING</strong>: (NOT IMPLEMENTED YET). <p>This is a proposal for future improvement of the RecorderEndpoint.</p> <p> It is possible to perform a dynamic adaptation of audio rate and add frame duplication to the video, such that the missing parts are filled with artificial data. This has the advantage of providing a smooth playback result, and at the same time conserving all original timestamps. </p> <p> However, the main issue with this method is that it requires accessing the decoded media; i.e., transcoding must be active. For this reason, the proposal is to offer this option to be enabled only when transcoding would still happen anyways. </p> <p> In our example, the RecorderEndpoint would change all missing frames like this: </p> <pre> frame 1 - 00:01 frame 2 - 00:02 frame 3 - 00:03 frame 4 - 00:04 frame 5 - 00:05 fake frame - 00:06 fake frame - 00:07 fake frame - 00:08 fake frame - 00:09 fake frame - 00:10 frame 11 - 00:11 frame 12 - 00:12 frame 13 - 00:13 frame 14 - 00:14 frame 15 - 00:15 </pre> <p> This joins the best of both worlds: on one hand, the playback should be smooth and even the most basic players should be able to handle the recording files without issue. On the other, the total length of the file is left unmodified, so it matches with the expected duration of the sessions that are being recorded. </p> </li> </ul>
const ( GAPSFIXMETHOD_NONE GapsFixMethod = "NONE" GAPSFIXMETHOD_GENPTS GapsFixMethod = "GENPTS" GAPSFIXMETHOD_FILL_IF_TRANSCODING GapsFixMethod = "FILL_IF_TRANSCODING" )
func (GapsFixMethod) String ¶
func (t GapsFixMethod) String() string
Implement fmt.Stringer interface
type GstreamerDotDetails ¶
type GstreamerDotDetails string
Details of gstreamer dot graphs
const ( GSTREAMERDOTDETAILS_SHOW_MEDIA_TYPE GstreamerDotDetails = "SHOW_MEDIA_TYPE" GSTREAMERDOTDETAILS_SHOW_CAPS_DETAILS GstreamerDotDetails = "SHOW_CAPS_DETAILS" GSTREAMERDOTDETAILS_SHOW_NON_DEFAULT_PARAMS GstreamerDotDetails = "SHOW_NON_DEFAULT_PARAMS" GSTREAMERDOTDETAILS_SHOW_STATES GstreamerDotDetails = "SHOW_STATES" GSTREAMERDOTDETAILS_SHOW_FULL_PARAMS GstreamerDotDetails = "SHOW_FULL_PARAMS" GSTREAMERDOTDETAILS_SHOW_ALL GstreamerDotDetails = "SHOW_ALL" GSTREAMERDOTDETAILS_SHOW_VERBOSE GstreamerDotDetails = "SHOW_VERBOSE" )
func (GstreamerDotDetails) String ¶
func (t GstreamerDotDetails) String() string
Implement fmt.Stringer interface
type HttpEndpoint ¶
type HttpEndpoint struct {
SessionEndpoint
}
Endpoint that enables Kurento to work as an HTTP server, allowing peer HTTP clients to access media.
func (*HttpEndpoint) GetUrl ¶
func (elem *HttpEndpoint) GetUrl() (string, error)
Obtains the URL associated to this endpoint Returns: // The url as a String
type HttpPostEndpoint ¶
type HttpPostEndpoint struct {
HttpEndpoint
}
An `HttpPostEndpoint` contains SINK pads for AUDIO and VIDEO, which provide access to an HTTP file upload function
This type of endpoint provide unidirectional communications. Its `MediaSources <MediaSource>` are accessed through the HTTP POST method.
type Hub ¶
type Hub struct {
MediaObject
}
A Hub is a routing `MediaObject`. It connects several `endpoints <Endpoint>` together
func (*Hub) GetGstreamerDot ¶
func (elem *Hub) GetGstreamerDot(details GstreamerDotDetails) (string, error)
Returns a string in dot (graphviz) format that represents the gstreamer elements inside the pipeline Returns: // The dot graph.
type HubPort ¶
type HubPort struct {
MediaElement
}
This `MediaElement` specifies a connection with a `Hub`
type IAlphaBlending ¶
type IBaseRtpEndpoint ¶
type IBaseRtpEndpoint interface { }
type IComposite ¶
type IComposite interface { }
type ICustomSerializer ¶
type ICustomSerializer interface {
CustomSerialize() map[string]interface{}
}
type IDispatcher ¶
type IDispatcherOneToMany ¶
type IHttpEndpoint ¶
type IHttpPostEndpoint ¶
type IHttpPostEndpoint interface { }
type IHub ¶
type IHub interface {
GetGstreamerDot(details GstreamerDotDetails) (string, error)
}
type IMediaElement ¶
type IMediaElement interface { GetSourceConnections(mediaType MediaType, description string) ([]ElementConnectionData, error) GetSinkConnections(mediaType MediaType, description string) ([]ElementConnectionData, error) Connect(sink IMediaElement, mediaType MediaType, sourceMediaDescription string, sinkMediaDescription string) error Disconnect(sink IMediaElement, mediaType MediaType, sourceMediaDescription string, sinkMediaDescription string) error SetAudioFormat(caps AudioCaps) error SetVideoFormat(caps VideoCaps) error GetGstreamerDot(details GstreamerDotDetails) (string, error) SetOutputBitrate(bitrate int) error GetStats(mediaType MediaType) (map[string]Stats, error) IsMediaFlowingIn(mediaType MediaType, sinkMediaDescription string) (bool, error) IsMediaFlowingOut(mediaType MediaType, sourceMediaDescription string) (bool, error) IsMediaTranscoding(mediaType MediaType, binName string) (bool, error) }
type IMediaObject ¶
type IMediaObject interface { // Each media object should be able to create another object // Those options are sent to getConstructorParams Create(IMediaObject, map[string]interface{}) error Release() error //Implement Stringer String() string // contains filtered or unexported methods }
IMediaElement implements some basic methods as getConstructorParams or Create().
type IMediaPipeline ¶
type IMediaPipeline interface {
GetGstreamerDot(details GstreamerDotDetails) (string, error)
}
type IPassThrough ¶
type IPassThrough interface { }
type IPlayerEndpoint ¶
type IPlayerEndpoint interface {
Play() error
}
type IRecorderEndpoint ¶
type IRtpEndpoint ¶
type IRtpEndpoint interface { }
type ISdpEndpoint ¶
type IServerManager ¶
type ISessionEndpoint ¶
type ISessionEndpoint interface { }
type IUriEndpoint ¶
type IWebRtcEndpoint ¶
type IceCandidate ¶
func (IceCandidate) CustomSerialize ¶
func (t IceCandidate) CustomSerialize() map[string]interface{}
type IceCandidatePair ¶
type IceComponentState ¶
type IceComponentState string
States of an ICE component.
const ( ICECOMPONENTSTATE_DISCONNECTED IceComponentState = "DISCONNECTED" ICECOMPONENTSTATE_GATHERING IceComponentState = "GATHERING" ICECOMPONENTSTATE_CONNECTING IceComponentState = "CONNECTING" ICECOMPONENTSTATE_CONNECTED IceComponentState = "CONNECTED" ICECOMPONENTSTATE_READY IceComponentState = "READY" ICECOMPONENTSTATE_FAILED IceComponentState = "FAILED" )
func (IceComponentState) String ¶
func (t IceComponentState) String() string
Implement fmt.Stringer interface
type IceConnection ¶
type IceConnection struct { StreamId string ComponentId int State IceComponentState }
type MediaElement ¶
type MediaElement struct { MediaObject // Minimum video bandwidth for transcoding. // @deprecated Deprecated due to a typo. Use :rom:meth:`minOutputBitrate` instead of this function. MinOuputBitrate int // Minimum video bitrate for transcoding. // <ul> // <li>Unit: bps (bits per second).</li> // <li>Default: 0.</li> // </ul> // MinOutputBitrate int // Maximum video bandwidth for transcoding. // @deprecated Deprecated due to a typo. Use :rom:meth:`maxOutputBitrate` instead of this function. MaxOuputBitrate int // Maximum video bitrate for transcoding. // <ul> // <li>Unit: bps (bits per second).</li> // <li>Default: MAXINT.</li> // <li>0 = unlimited.</li> // </ul> // MaxOutputBitrate int }
The basic building block of the media server, that can be interconnected inside a pipeline. <p> A `MediaElement` is a module that encapsulates a specific media capability, and that is able to exchange media with other MediaElements through an internal element called <b>pad</b>. </p> <p> A pad can be defined as an input or output interface. Input pads are called sinks, and it's where the media elements receive media from other media elements. Output interfaces are called sources, and it's the pad used by the media element to feed media to other media elements. There can be only one sink pad per media element. On the other hand, the number of source pads is unconstrained. This means that a certain media element can receive media only from one element at a time, while it can send media to many others. Pads are created on demand, when the connect method is invoked. When two media elements are connected, one media pad is created for each type of media connected. For example, if you connect AUDIO and VIDEO between two media elements, each one will need to create two new pads: one for AUDIO and one for VIDEO. </p> <p> When media elements are connected, it can be the case that the encoding required in both input and output pads is not the same, and thus it needs to be transcoded. This is something that is handled transparently by the MediaElement internals, but such transcoding has a toll in the form of a higher CPU load, so connecting MediaElements that need media encoded in different formats is something to consider as a high load operation. The event `MediaTranscodingStateChanged` allows to inform the client application of whether media transcoding is being enabled or not inside any MediaElement object. </p>
func (*MediaElement) Connect ¶
func (elem *MediaElement) Connect(sink IMediaElement, mediaType MediaType, sourceMediaDescription string, sinkMediaDescription string) error
Connects two elements, with the media flowing from left to right. <p> The element that invokes the connect will be the source of media, creating one sink pad for each type of media connected. The element given as parameter to the method will be the sink, and it will create one sink pad per media type connected. </p> <p> If otherwise not specified, all types of media are connected by default (AUDIO, VIDEO and DATA). It is recommended to connect the specific types of media if not all of them will be used. For this purpose, the connect method can be invoked more than once on the same two elements, but with different media types. </p> <p> The connection is unidirectional. If a bidirectional connection is desired, the position of the media elements must be inverted. For instance, webrtc1.connect(webrtc2) is connecting webrtc1 as source of webrtc2. In order to create a WebRTC one-2one conversation, the user would need to specify the connection on the other direction with webrtc2.connect(webrtc1). </p> <p> Even though one media element can have one sink pad per type of media, only one media element can be connected to another at a given time. If a media element is connected to another, the former will become the source of the sink media element, regardless whether there was another element connected or not. </p>
func (*MediaElement) Disconnect ¶
func (elem *MediaElement) Disconnect(sink IMediaElement, mediaType MediaType, sourceMediaDescription string, sinkMediaDescription string) error
Disconnects two media elements. This will release the source pads of the source media element, and the sink pads of the sink media element.
func (*MediaElement) GetGstreamerDot ¶
func (elem *MediaElement) GetGstreamerDot(details GstreamerDotDetails) (string, error)
Return a .dot file describing the topology of the media element. <p>The element can be queried for certain type of data:</p> <ul> <li>SHOW_ALL: default value</li> <li>SHOW_CAPS_DETAILS</li> <li>SHOW_FULL_PARAMS</li> <li>SHOW_MEDIA_TYPE</li> <li>SHOW_NON_DEFAULT_PARAMS</li> <li>SHOW_STATES</li> <li>SHOW_VERBOSE</li> </ul>
Returns: // The dot graph.
func (*MediaElement) GetSinkConnections ¶
func (elem *MediaElement) GetSinkConnections(mediaType MediaType, description string) ([]ElementConnectionData, error)
Gets information about the source pads of this media element. <p> Since source pads connect to other media element's sinks, this is formally the sink of media from the element's perspective. Media can be filtered by type, or by the description given to the pad though which both elements are connected. </p>
Returns: // A list of the connections information that are receiving media from this element. The list will be empty if no sources are found.
func (*MediaElement) GetSourceConnections ¶
func (elem *MediaElement) GetSourceConnections(mediaType MediaType, description string) ([]ElementConnectionData, error)
Gets information about the sink pads of this media element. <p> Since sink pads are the interface through which a media element gets it's media, whatever is connected to an element's sink pad is formally a source of media. Media can be filtered by type, or by the description given to the pad though which both elements are connected. </p>
Returns: // A list of the connections information that are sending media to this element. The list will be empty if no sources are found.
func (*MediaElement) GetStats ¶
func (elem *MediaElement) GetStats(mediaType MediaType) (map[string]Stats, error)
Gets the statistics related to an endpoint. If no media type is specified, it returns statistics for all available types. Returns: // Delivers a successful result in the form of a RTC stats report. A RTC stats report represents a map between strings, identifying the inspected objects (RTCStats.id), and their corresponding RTCStats objects.
func (*MediaElement) IsMediaFlowingIn ¶
func (elem *MediaElement) IsMediaFlowingIn(mediaType MediaType, sinkMediaDescription string) (bool, error)
This method indicates whether the media element is receiving media of a certain type. The media sink pad can be identified individually, if needed. It is only supported for AUDIO and VIDEO types, raising a MEDIA_OBJECT_ILLEGAL_PARAM_ERROR otherwise. If the pad indicated does not exist, if will return false. Returns: // TRUE if there is media, FALSE in other case.
func (*MediaElement) IsMediaFlowingOut ¶
func (elem *MediaElement) IsMediaFlowingOut(mediaType MediaType, sourceMediaDescription string) (bool, error)
This method indicates whether the media element is emitting media of a certain type. The media source pad can be identified individually, if needed. It is only supported for AUDIO and VIDEO types, raising a MEDIA_OBJECT_ILLEGAL_PARAM_ERROR otherwise. If the pad indicated does not exist, if will return false. Returns: // TRUE if there is media, FALSE in other case.
func (*MediaElement) IsMediaTranscoding ¶
func (elem *MediaElement) IsMediaTranscoding(mediaType MediaType, binName string) (bool, error)
Indicates whether this media element is actively transcoding between input and output pads. This operation is only supported for AUDIO and VIDEO media types, raising a MEDIA_OBJECT_ILLEGAL_PARAM_ERROR otherwise. The internal GStreamer processing bin can be indicated, if needed; if the bin doesn't exist, the return value will be FALSE. Returns: // TRUE if media is being transcoded, FALSE otherwise.
func (*MediaElement) SetAudioFormat ¶
func (elem *MediaElement) SetAudioFormat(caps AudioCaps) error
Set the type of data for the audio stream. <p> MediaElements that do not support configuration of audio capabilities will throw a MEDIA_OBJECT_ILLEGAL_PARAM_ERROR exception. </p> <p> NOTE: This method is not implemented yet by the Media Server to do anything useful. </p>
func (*MediaElement) SetOutputBitrate ¶
func (elem *MediaElement) SetOutputBitrate(bitrate int) error
@deprecated Allows change the target bitrate for the media output, if the media is encoded using VP8 or H264. This method only works if it is called before the media starts to flow.
func (*MediaElement) SetVideoFormat ¶
func (elem *MediaElement) SetVideoFormat(caps VideoCaps) error
Set the type of data for the video stream. <p> MediaElements that do not support configuration of video capabilities will throw a MEDIA_OBJECT_ILLEGAL_PARAM_ERROR exception </p> <p> NOTE: This method is not implemented yet by the Media Server to do anything useful. </p>
type MediaFlowState ¶
type MediaFlowState string
Flowing state of the media.
const ( MEDIAFLOWSTATE_FLOWING MediaFlowState = "FLOWING" MEDIAFLOWSTATE_NOT_FLOWING MediaFlowState = "NOT_FLOWING" )
func (MediaFlowState) String ¶
func (t MediaFlowState) String() string
Implement fmt.Stringer interface
type MediaLatencyStat ¶
type MediaObject ¶
type MediaObject struct { // `MediaPipeline` to which this <code>MediaObject</code> belongs. It returns itself when invoked for a pipeline object. MediaPipeline IMediaPipeline // Parent of this <code>MediaObject</code>. // <p> // The parent of a `Hub` or a `MediaElement` is its // `MediaPipeline`. A `MediaPipeline` has no parent, so this // property will be null. // </p> // Parent IMediaObject // Unique identifier of this <code>MediaObject</code>. // <p> // It's a synthetic identifier composed by a GUID and // <code>MediaObject</code> type. The ID is prefixed with the parent ID when the // object has parent: <i>ID_parent/ID_media-object</i>. // </p> // Id string // Children of this <code>MediaObject</code>. // @deprecated Use children instead. // Childs []IMediaObject // Children of this <code>MediaObject</code>. Children []IMediaObject // This <code>MediaObject</code>'s name. // <p> // This is just sugar to simplify developers' life debugging, it is not used // internally for indexing nor identifying the objects. By default, it's the // object's ID. // </p> // Name string // Flag activating or deactivating sending the element's tags in fired events. SendTagsInEvents bool // <code>MediaObject</code> creation time in seconds since Epoch. CreationTime int // contains filtered or unexported fields }
Base interface used to manage capabilities common to all Kurento elements. <h4>Properties</h4> <ul> <li> <b>id</b>: unique identifier assigned to this <code>MediaObject</code> at instantiation time. `MediaPipeline` IDs are generated with a GUID followed by suffix <code>_kurento.MediaPipeline</code>. `MediaElement` IDs are also a GUID with suffix <code>_kurento.{ElementType}</code> and prefixed by parent's ID. <blockquote> <dl> <dt><i>MediaPipeline ID example</i></dt> <dd> <code> 907cac3a-809a-4bbe-a93e-ae7e944c5cae_kurento.MediaPipeline </code> </dd> <dt><i>MediaElement ID example</i></dt> <dd> <code> 907cac3a-809a-4bbe-a93e-ae7e944c5cae_kurento.MediaPipeline/403da25a-805b-4cf1-8c55-f190588e6c9b_kurento.WebRtcEndpoint </code> </dd> </dl> </blockquote> </li> <li> <b>name</b>: free text intended to provide a friendly name for this <code>MediaObject</code>. Its default value is the same as the ID. </li> <li> <b>tags</b>: key-value pairs intended for applications to associate metadata to this <code>MediaObject</code> instance. </li> </ul> <p></p> <h4>Events</h4> <ul> <li> <strong>:rom:evt:`Error`<strong>: reports asynchronous error events. It is recommended to always subscribe a listener to this event, as regular error from the pipeline will be notified through it, instead of through an exception when invoking a method. </li> </ul>
func (*MediaObject) AddTag ¶
func (elem *MediaObject) AddTag(key string, value string) error
Adds a new tag to this <code>MediaObject</code>. If the tag is already present, it changes the value.
func (*MediaObject) Create ¶
func (elem *MediaObject) Create(m IMediaObject, options map[string]interface{}) error
Create object "m" with given "options"
func (*MediaObject) GetTag ¶
func (elem *MediaObject) GetTag(key string) (string, error)
Returns the value of given tag, or MEDIA_OBJECT_TAG_KEY_NOT_FOUND if tag is not defined. Returns: // The value associated to the given key.
func (*MediaObject) GetTags ¶
func (elem *MediaObject) GetTags() ([]Tag, error)
Returns all tags attached to this <code>MediaObject</code>. Returns: // An array containing all key-value pairs associated with this <code>MediaObject</code>.
func (*MediaObject) Release ¶
func (elem *MediaObject) Release() error
func (*MediaObject) RemoveTag ¶
func (elem *MediaObject) RemoveTag(key string) error
Removes an existing tag. Exists silently with no error if tag is not defined.
func (*MediaObject) String ¶
func (m *MediaObject) String() string
String implements fmt.Stringer interface, return ID
type MediaPipeline ¶
type MediaPipeline struct { MediaObject // If statistics about pipeline latency are enabled for all mediaElements LatencyStats bool }
A pipeline is a container for a collection of `MediaElements<MediaElement>` and `MediaMixers<MediaMixer>`. It offers the methods needed to control the creation and connection of elements inside a certain pipeline.
func (*MediaPipeline) GetGstreamerDot ¶
func (elem *MediaPipeline) GetGstreamerDot(details GstreamerDotDetails) (string, error)
Returns a string in dot (graphviz) format that represents the gstreamer elements inside the pipeline Returns: // The dot graph.
type MediaProfileSpecType ¶
type MediaProfileSpecType string
Media profile, used by the RecorderEndpoint builder to specify the codecs and media container that should be used for the recordings.
const ( MEDIAPROFILESPECTYPE_WEBM MediaProfileSpecType = "WEBM" MEDIAPROFILESPECTYPE_MKV MediaProfileSpecType = "MKV" MEDIAPROFILESPECTYPE_MP4 MediaProfileSpecType = "MP4" MEDIAPROFILESPECTYPE_WEBM_VIDEO_ONLY MediaProfileSpecType = "WEBM_VIDEO_ONLY" MEDIAPROFILESPECTYPE_WEBM_AUDIO_ONLY MediaProfileSpecType = "WEBM_AUDIO_ONLY" MEDIAPROFILESPECTYPE_MKV_VIDEO_ONLY MediaProfileSpecType = "MKV_VIDEO_ONLY" MEDIAPROFILESPECTYPE_MKV_AUDIO_ONLY MediaProfileSpecType = "MKV_AUDIO_ONLY" MEDIAPROFILESPECTYPE_MP4_VIDEO_ONLY MediaProfileSpecType = "MP4_VIDEO_ONLY" MEDIAPROFILESPECTYPE_MP4_AUDIO_ONLY MediaProfileSpecType = "MP4_AUDIO_ONLY" MEDIAPROFILESPECTYPE_JPEG_VIDEO_ONLY MediaProfileSpecType = "JPEG_VIDEO_ONLY" MEDIAPROFILESPECTYPE_KURENTO_SPLIT_RECORDER MediaProfileSpecType = "KURENTO_SPLIT_RECORDER" MEDIAPROFILESPECTYPE_FLV MediaProfileSpecType = "FLV" )
func (MediaProfileSpecType) String ¶
func (t MediaProfileSpecType) String() string
Implement fmt.Stringer interface
type MediaState ¶
type MediaState string
State of the media.
const ( MEDIASTATE_DISCONNECTED MediaState = "DISCONNECTED" MEDIASTATE_CONNECTED MediaState = "CONNECTED" )
type MediaTranscodingState ¶
type MediaTranscodingState string
Transcoding state for a media.
const ( MEDIATRANSCODINGSTATE_TRANSCODING MediaTranscodingState = "TRANSCODING" MEDIATRANSCODINGSTATE_NOT_TRANSCODING MediaTranscodingState = "NOT_TRANSCODING" )
func (MediaTranscodingState) String ¶
func (t MediaTranscodingState) String() string
Implement fmt.Stringer interface
type MediaType ¶
type MediaType string
Type of media stream to be exchanged. Can take the values AUDIO, DATA or VIDEO.
type Mixer ¶
type Mixer struct {
Hub
}
A `Hub` that allows routing of video between arbitrary port pairs and mixing of audio among several ports
type ModuleInfo ¶
type OfferOptions ¶
type PassThrough ¶
type PassThrough struct {
MediaElement
}
This `MediaElement` that just passes media through
type PlayerEndpoint ¶
type PlayerEndpoint struct { UriEndpoint // Returns info about the source being played VideoInfo *VideoInfo // Returns the GStreamer DOT string for this element's private pipeline ElementGstreamerDot string // Get or set the actual position of the video in ms. .. note:: Setting the position only works for seekable videos Position int64 }
Retrieves content from external sources. <p> PlayerEndpoint will access the given resource, read all available data, and inject it into Kurento. Once this is is done, the injected video or audio will be available for passing through any other Filter or Endpoint to which the PlayerEndpoint gets connected. </p> <p> The source can provide either seekable or non-seekable media; this will dictate whether the PlayerEndpoint is able (or not) to seek through the file, for example to jump to any given timestamp. </p> <p>The <strong>Source URI</strong> supports these formats:</p> <ul> <li> File: A file path that will be read from the local file system. Example: <ul> <li><code>file:///path/to/file</code></li> </ul> </li> <li> HTTP: Any file available in an HTTP server. Examples: <ul> <li><code>http(s)://{server-ip}/path/to/file</code></li> <li> <code> http(s)://{username}:{password}@{server-ip}:{server-port}/path/to/file </code> </li> </ul> </li> <li> RTSP: Typically used to capture a feed from an IP Camera. Examples: <ul> <li><code>rtsp://{server-ip}</code></li> <li> <code> rtsp://{username}:{password}@{server-ip}:{server-port}/path/to/file </code> </li> </ul> </li> <li> <strong> NOTE (for current versions of Kurento 6.x): special characters are not supported in <code>{username}</code> or <code>{password}</code>. </strong> This means that <code>{username}</code> cannot contain colons (<code>:</code>), and <code>{password}</code> cannot contain 'at' signs (<code>@</code>). This is a limitation of GStreamer 1.8 (the underlying media framework behind Kurento), and is already fixed in newer versions (which the upcoming Kurento 7.x will use). </li> <li> <strong> NOTE (for upcoming Kurento 7.x): special characters in <code>{username}</code> or <code>{password}</code> must be url-encoded. </strong> This means that colons (<code>:</code>) should be replaced with <code>%3A</code>, and 'at' signs (<code>@</code>) should be replaced with <code>%40</code>. </li> </ul> <p> Note that <strong> PlayerEndpoint requires read permissions to the source </strong> ; otherwise, the media server won't be able to retrieve any data, and an :rom:evt:`Error` will be fired. Make sure your application subscribes to this event, otherwise troubleshooting issues will be difficult. </p>
<p>The list of valid operations is:</p> <ul> <li> <strong><code>play</code></strong> : Starts streaming media. If invoked after pause, it will resume playback. </li> <li> <strong><code>stop</code></strong> : Stops streaming media. If play is invoked afterwards, the file will be streamed from the beginning. </li> <li> <strong><code>pause</code></strong> : Pauses media streaming. Play must be invoked in order to resume playback. </li> <li> <strong><code>seek</code></strong> : If the source supports seeking to a different time position, then the PlayerEndpoint can: <ul> <li> <strong><code>setPosition</code></strong> : Allows to set the position in the file. </li> <li> <strong><code>getPosition</code></strong> : Returns the current position being streamed. </li> </ul> </li> </ul> <h2>Events fired</h2> <ul> <li> <strong>EndOfStreamEvent</strong>: If the file is streamed completely. </li> </ul>
func (*PlayerEndpoint) Play ¶
func (elem *PlayerEndpoint) Play() error
Starts reproducing the media, sending it to the `MediaSource`. If the endpoint
has been connected to other endpoints, those will start receiving media.
type RTCCertificateStats ¶
type RTCDataChannelState ¶
type RTCDataChannelState string
Represents the state of the RTCDataChannel
const ( RTCDATACHANNELSTATE_connecting RTCDataChannelState = "connecting" RTCDATACHANNELSTATE_open RTCDataChannelState = "open" RTCDATACHANNELSTATE_closing RTCDataChannelState = "closing" RTCDATACHANNELSTATE_closed RTCDataChannelState = "closed" )
func (RTCDataChannelState) String ¶
func (t RTCDataChannelState) String() string
Implement fmt.Stringer interface
type RTCDataChannelStats ¶
type RTCIceCandidatePairStats ¶
type RTCIceCandidatePairStats struct { TransportId string LocalCandidateId string RemoteCandidateId string State RTCStatsIceCandidatePairState Priority int64 Nominated bool Writable bool Readable bool BytesSent int64 BytesReceived int64 RoundTripTime float64 AvailableOutgoingBitrate float64 AvailableIncomingBitrate float64 }
type RTCMediaStreamStats ¶
type RTCMediaStreamTrackStats ¶
type RTCMediaStreamTrackStats struct { TrackIdentifier string RemoteSource bool SsrcIds []string FrameWidth int64 FrameHeight int64 FramesPerSecond float64 FramesSent int64 FramesReceived int64 FramesDecoded int64 FramesDropped int64 FramesCorrupted int64 AudioLevel float64 EchoReturnLoss float64 EchoReturnLossEnhancement float64 }
type RTCPeerConnectionStats ¶
type RTCRTPStreamStats ¶
type RTCStatsIceCandidatePairState ¶
type RTCStatsIceCandidatePairState string
Represents the state of the checklist for the local and remote candidates in a pair.
const ( RTCSTATSICECANDIDATEPAIRSTATE_frozen RTCStatsIceCandidatePairState = "frozen" RTCSTATSICECANDIDATEPAIRSTATE_waiting RTCStatsIceCandidatePairState = "waiting" RTCSTATSICECANDIDATEPAIRSTATE_inprogress RTCStatsIceCandidatePairState = "inprogress" RTCSTATSICECANDIDATEPAIRSTATE_failed RTCStatsIceCandidatePairState = "failed" RTCSTATSICECANDIDATEPAIRSTATE_succeeded RTCStatsIceCandidatePairState = "succeeded" RTCSTATSICECANDIDATEPAIRSTATE_cancelled RTCStatsIceCandidatePairState = "cancelled" )
func (RTCStatsIceCandidatePairState) String ¶
func (t RTCStatsIceCandidatePairState) String() string
Implement fmt.Stringer interface
type RTCStatsIceCandidateType ¶
type RTCStatsIceCandidateType string
Types of candidates
const ( RTCSTATSICECANDIDATETYPE_host RTCStatsIceCandidateType = "host" RTCSTATSICECANDIDATETYPE_serverreflexive RTCStatsIceCandidateType = "serverreflexive" RTCSTATSICECANDIDATETYPE_peerreflexive RTCStatsIceCandidateType = "peerreflexive" RTCSTATSICECANDIDATETYPE_relayed RTCStatsIceCandidateType = "relayed" )
func (RTCStatsIceCandidateType) String ¶
func (t RTCStatsIceCandidateType) String() string
Implement fmt.Stringer interface
type RTCTransportStats ¶
type RecorderEndpoint ¶
type RecorderEndpoint struct {
UriEndpoint
}
Provides functionality to store media contents. <p> RecorderEndpoint can store media into local files or send it to a remote network storage. When another `MediaElement` is connected to a RecorderEndpoint, the media coming from the former will be muxed into the selected recording format and stored in the designated location. </p> <p> These parameters must be provided to create a RecorderEndpoint, and they cannot be changed afterwards: </p> <ul> <li> <strong>Destination URI</strong>, where media will be stored. These formats are supported: <ul> <li> File: A file path that will be written into the local file system. Example: <ul> <li><code>file:///path/to/file</code></li> </ul> </li> <li> HTTP: A POST request will be used against a remote server. The server must support using the <i>chunked</i> encoding mode (HTTP header <code>Transfer-Encoding: chunked</code>). Examples: <ul> <li><code>http(s)://{server-ip}/path/to/file</code></li> <li> <code> http(s)://{username}:{password}@{server-ip}:{server-port}/path/to/file </code> </li> </ul> </li> <li> Relative URIs (with no schema) are supported. They are completed by prepending a default URI defined by property <i>defaultPath</i>. This property is defined in the configuration file <i>/etc/kurento/modules/kurento/UriEndpoint.conf.ini</i>, and the default value is <code>file:///var/lib/kurento/</code> </li> <li> <strong> NOTE (for current versions of Kurento 6.x): special characters are not supported in <code>{username}</code> or <code>{password}</code>. </strong> This means that <code>{username}</code> cannot contain colons (<code>:</code>), and <code>{password}</code> cannot contain 'at' signs (<code>@</code>). This is a limitation of GStreamer 1.8 (the underlying media framework behind Kurento), and is already fixed in newer versions (which the upcoming Kurento 7.x will use). </li> <li> <strong> NOTE (for upcoming Kurento 7.x): special characters in <code>{username}</code> or <code>{password}</code> must be url-encoded. </strong> This means that colons (<code>:</code>) should be replaced with '<code>%3A</code>', and 'at' signs (<code>@</code>) should be replaced with '<code>%40</code>'. </li> </ul> </li> <li> <strong>Media Profile</strong> (:rom:enum:`MediaProfileSpecType`), which determines the video and audio encoding. See below for more details. </li> <li> <strong>EndOfStream</strong> (optional), a parameter that dictates if the recording should be automatically stopped once the EOS event is detected. </li> </ul> <p> Note that <strong> RecorderEndpoint requires write permissions to the destination </strong> ; otherwise, the media server won't be able to store any information, and an :rom:evt:`Error` will be fired. Make sure your application subscribes to this event, otherwise troubleshooting issues will be difficult. </p> <ul> <li> To write local files (if you use <code>file://</code>), the system user that is owner of the media server process needs to have write permissions for the requested path. By default, this user is named '<code>kurento</code>'. </li> <li> To record through HTTP, the remote server must be accessible through the network, and also have the correct write permissions for the destination path. </li> </ul> <p> Recording will start as soon as the user invokes the <code>record()</code> method. The recorder will then store, in the location indicated, the media that the source is sending to the endpoint. If no media is being received, or no endpoint has been connected, then the destination will be empty. The recorder starts storing information into the file as soon as it gets it. </p> <p> <strong>Recording must be stopped</strong> when no more data should be stored. This is done with the <code>stopAndWait()</code> method, which blocks and returns only after all the information was stored correctly. </p> <p> The source endpoint can be hot-swapped while the recording is taking place. The recorded file will then contain different feeds. When switching video sources, if the new video has different size, the recorder will retain the size of the previous source. If the source is disconnected, the last frame recorded will be shown for the duration of the disconnection, or until the recording is stopped. </p> <p> <strong> NOTE: It is recommended to start recording only after media arrives. </strong> For this, you may use the <code>MediaFlowInStateChanged</code> and <code>MediaFlowOutStateChanged</code> events of your endpoints, and synchronize the recording with the moment media comes into the Recorder. </p> <p> <strong> WARNING: All connected media types must be flowing to the RecorderEndpoint. </strong> If you used the default <code>connect()</code> method, it will assume both AUDIO and VIDEO. Failing to provide both kinds of media will result in the RecorderEndpoint creating an empty file and buffering indefinitely; the recorder waits until all kinds of media start arriving, in order to synchronize them appropriately.<br> For audio-only or video-only recordings, make sure to use the correct, media-specific variant of the <code>connect()</code> method. </p> <p> For example: </p> <ol> <li> When a web browser's video arrives to Kurento via WebRTC, your WebRtcEndpoint will emit a <code>MediaFlowOutStateChanged</code> event. </li> <li> When video starts flowing from the WebRtcEndpoint to the RecorderEndpoint, the RecorderEndpoint will emit a <code>MediaFlowInStateChanged</code> event. You should start recording at this point. </li> <li> You should only start recording when RecorderEndpoint has notified a <code>MediaFlowInStateChanged</code> for ALL streams. So, if you record AUDIO+VIDEO, your application must receive a <code>MediaFlowInStateChanged</code> event for audio, and another <code>MediaFlowInStateChanged</code> event for video. </li> </ol>
func (*RecorderEndpoint) Record ¶
func (elem *RecorderEndpoint) Record() error
Starts storing media received through the sink pad.
func (*RecorderEndpoint) StopAndWait ¶
func (elem *RecorderEndpoint) StopAndWait() error
Stops recording and does not return until all the content has been written to the selected uri. This can cause timeouts on some clients if there is too much content to write, or the transport is slow
type RembParams ¶
type RtpEndpoint ¶
type RtpEndpoint struct {
BaseRtpEndpoint
}
Endpoint that provides bidirectional content delivery capabilities with remote networked peers through RTP or SRTP protocol. An `RtpEndpoint` contains paired sink and source `MediaPad` for audio and video. This endpoint inherits from `BaseRtpEndpoint`. </p> <p> In order to establish an RTP/SRTP communication, peers engage in an SDP negotiation process, where one of the peers (the offerer) sends an offer, while the other peer (the offeree) responds with an answer. This endpoint can function in both situations <ul style='list-style-type:circle'> <li> As offerer: The negotiation process is initiated by the media server <ul> <li>KMS generates the SDP offer through the generateOffer method. This offer must then be sent to the remote peer (the offeree) through the signaling channel, for processing.</li> <li>The remote peer process the Offer, and generates an Answer to this offer. The Answer is sent back to the media server.</li> <li>Upon receiving the Answer, the endpoint must invoke the processAnswer method.</li> </ul> </li> <li> As offeree: The negotiation process is initiated by the remote peer <ul> <li>The remote peer, acting as offerer, generates an SDP offer and sends it to the WebRTC endpoint in Kurento.</li> <li>The endpoint will process the Offer invoking the processOffer method. The result of this method will be a string, containing an SDP Answer.</li> <li>The SDP Answer must be sent back to the offerer, so it can be processed.</li> </ul> </li> </ul> </p> <p> In case of unidirectional connections (i.e. only one peer is going to send media), the process is more simple, as only the emitter needs to process an SDP. On top of the information about media codecs and types, the SDP must contain the IP of the remote peer, and the port where it will be listening. This way, the SDP can be mangled without needing to go through the exchange process, as the receiving peer does not need to process any answer. </p> <p> The user can set some bandwidth limits that will be used during the negotiation process. The default bandwidth range of the endpoint is 100kbps-500kbps, but it can be changed separately for input/output directions and for audio/video streams. <ul style='list-style-type:circle'> <li> Input bandwidth control mechanism: Configuration interval used to inform remote peer the range of bitrates that can be pushed into this RtpEndpoint object. These values are announced in the SDP. <ul> <li> setMaxVideoRecvBandwidth: sets Max bitrate limits expected for received video stream. </li> <li> setMaxAudioRecvBandwidth: sets Max bitrate limits expected for received audio stream. </li> </ul> </li> <li> Output bandwidth control mechanism: Configuration interval used to control bitrate of the output video stream sent to remote peer. Remote peers can also announce bandwidth limitation in their SDPs (through the b=<modifier>:<value> tag). Kurento will always enforce bitrate limitations specified by the remote peer over internal configurations. <ul> <li> setMaxVideoSendBandwidth: sets Max bitrate limits for video sent to remote peer. </li> <li> setMinVideoSendBandwidth: sets Min bitrate limits for audio sent to remote peer. </li> </ul> </li> </ul> All bandwidth control parameters must be changed before the SDP negotiation takes place, and can't be modified afterwards. TODO: What happens if the b=as tag form the SDP has a lower value than the one set in setMinVideoSendBandwidth? </p> <p> Take into consideration that setting a too high upper limit for the output bandwidth can be a reason for the local network connection to be overflooded. </p>
type SDES ¶
type SDES struct { Key string KeyBase64 string Crypto CryptoSuite }
type SdpEndpoint ¶
type SdpEndpoint struct { SessionEndpoint // Maximum input bitrate, signaled in SDP Offers to WebRTC and RTP senders. // <p> // This is used to put a limit on the bitrate that the remote peer will send to // this endpoint. The net effect of setting this parameter is that // <i>when Kurento generates an SDP Offer</i>, an 'Application Specific' (AS) // maximum bandwidth attribute will be added to the SDP media section: // <code>b=AS:{value}</code>. // </p> // <p>Note: This parameter has to be set before the SDP is generated.</p> // <ul> // <li>Unit: kbps (kilobits per second).</li> // <li>Default: 0.</li> // <li>0 = unlimited.</li> // </ul> // MaxAudioRecvBandwidth int // Maximum input bitrate, signaled in SDP Offers to WebRTC and RTP senders. // <p> // This is used to put a limit on the bitrate that the remote peer will send to // this endpoint. The net effect of setting this parameter is that // <i>when Kurento generates an SDP Offer</i>, an 'Application Specific' (AS) // maximum bandwidth attribute will be added to the SDP media section: // <code>b=AS:{value}</code>. // </p> // <p>Note: This parameter has to be set before the SDP is generated.</p> // <ul> // <li>Unit: kbps (kilobits per second).</li> // <li>Default: 0.</li> // <li>0 = unlimited.</li> // </ul> // MaxVideoRecvBandwidth int }
Interface implemented by Endpoints that require an SDP Offer/Answer negotiation in order to configure a media session. <p>Functionality provided by this API:</p> <ul> <li>Generate SDP offers.</li> <li>Process SDP offers.</li> <li>Configure SDP related params.</li> </ul>
func (*SdpEndpoint) GenerateOffer ¶
func (elem *SdpEndpoint) GenerateOffer(options OfferOptions) (string, error)
Generates an SDP offer with media capabilities of the Endpoint. Throws: <ul> <li> SDP_END_POINT_ALREADY_NEGOTIATED If the endpoint is already negotiated. </li> <li> SDP_END_POINT_GENERATE_OFFER_ERROR if the generated offer is empty. This is most likely due to an internal error. </li> </ul>
Returns: // The SDP offer.
func (*SdpEndpoint) GetLocalSessionDescriptor ¶
func (elem *SdpEndpoint) GetLocalSessionDescriptor() (string, error)
Returns the local SDP. <ul> <li> No offer has been generated: returns null. </li> <li> Offer has been generated: returns the SDP offer. </li> <li> Offer has been generated and answer processed: returns the agreed SDP. </li> </ul>
Returns: // The last agreed SessionSpec.
func (*SdpEndpoint) GetRemoteSessionDescriptor ¶
func (elem *SdpEndpoint) GetRemoteSessionDescriptor() (string, error)
This method returns the remote SDP. If the negotiation process is not complete, it will return NULL.
Returns: // The last agreed User Agent session description.
func (*SdpEndpoint) ProcessAnswer ¶
func (elem *SdpEndpoint) ProcessAnswer(answer string) (string, error)
Generates an SDP offer with media capabilities of the Endpoint. Throws: <ul> <li> SDP_PARSE_ERROR If the offer is empty or has errors. </li> <li> SDP_END_POINT_ALREADY_NEGOTIATED If the endpoint is already negotiated. </li> <li> SDP_END_POINT_PROCESS_ANSWER_ERROR if the result of processing the answer is an empty string. This is most likely due to an internal error. </li> <li> SDP_END_POINT_NOT_OFFER_GENERATED If the method is invoked before the generateOffer method. </li> </ul>
Returns: // Updated SDP offer, based on the answer received.
func (*SdpEndpoint) ProcessOffer ¶
func (elem *SdpEndpoint) ProcessOffer(offer string) (string, error)
Processes SDP offer of the remote peer, and generates an SDP answer based on the endpoint's capabilities. <p> If no matching capabilities are found, the SDP will contain no codecs. </p> Throws: <ul> <li> SDP_PARSE_ERROR If the offer is empty or has errors. </li> <li> SDP_END_POINT_ALREADY_NEGOTIATED If the endpoint is already negotiated. </li> <li> SDP_END_POINT_PROCESS_OFFER_ERROR if the generated offer is empty. This is most likely due to an internal error. </li> </ul>
Returns: // The chosen configuration from the ones stated in the SDP offer.
type ServerInfo ¶
type ServerInfo struct { Version string Modules []ModuleInfo Type ServerType Capabilities []string }
type ServerManager ¶
type ServerManager struct { MediaObject // Server information, version, modules, factories, etc Info *ServerInfo // All the pipelines available in the server Pipelines []IMediaPipeline // All active sessions in the server Sessions []string // Metadata stored in the server Metadata string }
This is a standalone object for managing the MediaServer
func (*ServerManager) GetCpuCount ¶
func (elem *ServerManager) GetCpuCount() (int, error)
Number of CPU cores that the media server can use. <p> Linux processes can be configured to use only a subset of the cores that are available in the system, via the process affinity settings (<strong>sched_setaffinity(2)</strong>). With this method it is possible to know the number of cores that the media server can use in the machine where it is running. </p> <p> For example, it's possible to limit the core affinity inside a Docker container by running with a command such as <em>docker run --cpuset-cpus='0,1'</em>. </p> <p> Note that the return value represents the number of <em>logical</em> processing units available, i.e. CPU cores including Hyper-Threading. </p>
Returns: // Number of CPU cores available for the media server.
func (*ServerManager) GetKmd ¶
func (elem *ServerManager) GetKmd(moduleName string) (string, error)
Returns the kmd associated to a module Returns: // The kmd file.
func (*ServerManager) GetUsedCpu ¶
func (elem *ServerManager) GetUsedCpu(interval int) (float64, error)
Average CPU usage of the server. <p> This method measures the average CPU usage of the media server during the requested interval. Normally you will want to choose an interval between 1000 and 10000 ms. </p> <p> The returned value represents the global system CPU usage of the media server, as an average across all processing units (CPU cores). </p>
Returns: // CPU usage %.
func (*ServerManager) GetUsedMemory ¶
func (elem *ServerManager) GetUsedMemory() (int64, error)
Returns the amount of memory that the server is using, in KiB Returns: // Used memory, in KiB.
type ServerType ¶
type ServerType string
Indicates if the server is a real media server or a proxy
const ( SERVERTYPE_KMS ServerType = "KMS" SERVERTYPE_KCS ServerType = "KCS" )
type SessionEndpoint ¶
type SessionEndpoint struct {
Endpoint
}
All networked Endpoints that require to manage connection sessions with remote peers implement this interface.
type StatsType ¶
type StatsType string
The type of the object.
const ( STATSTYPE_inboundrtp StatsType = "inboundrtp" STATSTYPE_outboundrtp StatsType = "outboundrtp" STATSTYPE_session StatsType = "session" STATSTYPE_datachannel StatsType = "datachannel" STATSTYPE_track StatsType = "track" STATSTYPE_transport StatsType = "transport" STATSTYPE_candidatepair StatsType = "candidatepair" STATSTYPE_localcandidate StatsType = "localcandidate" STATSTYPE_remotecandidate StatsType = "remotecandidate" STATSTYPE_element StatsType = "element" STATSTYPE_endpoint StatsType = "endpoint" )
type UriEndpoint ¶
type UriEndpoint struct { Endpoint // The uri for this endpoint. Uri string // State of the endpoint State *UriEndpointState }
Interface for endpoints the require a URI to work. An example of this, would be a `PlayerEndpoint` whose URI property could be used to locate a file to stream.
type UriEndpointState ¶
type UriEndpointState string
State of the endpoint
const ( URIENDPOINTSTATE_STOP UriEndpointState = "STOP" URIENDPOINTSTATE_START UriEndpointState = "START" URIENDPOINTSTATE_PAUSE UriEndpointState = "PAUSE" )
func (UriEndpointState) String ¶
func (t UriEndpointState) String() string
Implement fmt.Stringer interface
type VideoCaps ¶
type VideoCaps struct { Codec VideoCodec Framerate Fraction }
type VideoCodec ¶
type VideoCodec string
Codec used for transmission of video.
const ( VIDEOCODEC_VP8 VideoCodec = "VP8" VIDEOCODEC_H264 VideoCodec = "H264" VIDEOCODEC_RAW VideoCodec = "RAW" )
type WebRtcEndpoint ¶
type WebRtcEndpoint struct { BaseRtpEndpoint // Local network interfaces used for ICE gathering. // <p> // If you know which network interfaces should be used to perform ICE (for WebRTC // connectivity), you can define them here. Doing so has several advantages: // </p> // <ul> // <li> // The WebRTC ICE gathering process will be much quicker. Normally, it needs to // gather local candidates for all of the network interfaces, but this step can // be made faster if you limit it to only the interface that you know will // work. // </li> // <li> // It will ensure that the media server always decides to use the correct // network interface. With WebRTC ICE gathering it's possible that, under some // circumstances (in systems with virtual network interfaces such as // <code>docker0</code>) the ICE process ends up choosing the wrong local IP. // </li> // </ul> // <p> // <code>networkInterfaces</code> is a comma-separated list of network interface // names. // </p> // <p>Examples:</p> // <ul> // <li><code>networkInterfaces=eth0</code></li> // <li><code>networkInterfaces=eth0,enp0s25</code></li> // </ul> // NetworkInterfaces string // Enable ICE-TCP candidate gathering. // <p> // This setting enables or disables using TCP for ICE candidate gathering in the // underlying libnice library: // https://libnice.freedesktop.org/libnice/NiceAgent.html#NiceAgent--ice-tcp // </p> // <p> // You might want to disable ICE-TCP to potentially speed up ICE gathering by // avoiding TCP candidates in scenarios where they are not needed. // </p> // <p><code>iceTcp</code> is either 1 (ON) or 0 (OFF). Default: 1 (ON).</p> // IceTcp bool // STUN server IP address. // <p>The ICE process uses STUN to punch holes through NAT firewalls.</p> // <p> // <code>stunServerAddress</code> MUST be an IP address; domain names are NOT // supported. // </p> // <p> // You need to use a well-working STUN server. Use this to check if it works:<br /> // https://webrtc.github.io/samples/src/content/peerconnection/trickle-ice/<br /> // From that check, you should get at least one Server-Reflexive Candidate (type // <code>srflx</code>). // </p> // StunServerAddress string // Port of the STUN server StunServerPort int // TURN server URL. // <p> // When STUN is not enough to open connections through some NAT firewalls, using // TURN is the remaining alternative. // </p> // <p> // Note that TURN is a superset of STUN, so you don't need to configure STUN if // you are using TURN. // </p> // <p>The provided URL should follow one of these formats:</p> // <ul> // <li><code>user:password@ipaddress:port</code></li> // <li> // <code>user:password@ipaddress:port?transport=[udp|tcp|tls]</code> // </li> // </ul> // <p> // <code>ipaddress</code> MUST be an IP address; domain names are NOT supported.<br /> // <code>transport</code> is OPTIONAL. Possible values: udp, tcp, tls. Default: udp. // </p> // <p> // You need to use a well-working TURN server. Use this to check if it works:<br /> // https://webrtc.github.io/samples/src/content/peerconnection/trickle-ice/<br /> // From that check, you should get at least one Server-Reflexive Candidate (type // <code>srflx</code>) AND one Relay Candidate (type <code>relay</code>). // </p> // TurnUrl string // External IPv4 address of the media server. // <p> // Forces all local IPv4 ICE candidates to have the given address. This is really // nothing more than a hack, but it's very effective to force a public IP address // when one is known in advance for the media server. In doing so, KMS will not // need a STUN or TURN server, but remote peers will still be able to contact it. // </p> // <p> // You can try using this setting if KMS is deployed on a publicly accessible // server, without NAT, and with a static public IP address. But if it doesn't // work for you, just go back to configuring a STUN or TURN server for ICE. // </p> // <p> // Only set this parameter if you know what you're doing, and you understand 100% // WHY you need it. For the majority of cases, you should just prefer to // configure a STUN or TURN server. // </p> // <p><code>externalIPv4</code> is a single IPv4 address.</p> // <p>Example:</p> // <ul> // <li><code>externalIPv4=198.51.100.1</code></li> // </ul> // ExternalIPv4 string // External IPv6 address of the media server. // <p> // Forces all local IPv6 ICE candidates to have the given address. This is really // nothing more than a hack, but it's very effective to force a public IP address // when one is known in advance for the media server. In doing so, KMS will not // need a STUN or TURN server, but remote peers will still be able to contact it. // </p> // <p> // You can try using this setting if KMS is deployed on a publicly accessible // server, without NAT, and with a static public IP address. But if it doesn't // work for you, just go back to configuring a STUN or TURN server for ICE. // </p> // <p> // Only set this parameter if you know what you're doing, and you understand 100% // WHY you need it. For the majority of cases, you should just prefer to // configure a STUN or TURN server. // </p> // <p><code>externalIPv6</code> is a single IPv6 address.</p> // <p>Example:</p> // <ul> // <li><code>externalIPv6=2001:0db8:85a3:0000:0000:8a2e:0370:7334</code></li> // </ul> // ExternalIPv6 string // External IP address of the media server. // <p> // Forces all local IPv4 and IPv6 ICE candidates to have the given address. This // is really nothing more than a hack, but it's very effective to force a public // IP address when one is known in advance for the media server. In doing so, KMS // will not need a STUN or TURN server, but remote peers will still be able to // contact it. // </p> // <p> // You can try using this setting if KMS is deployed on a publicly accessible // server, without NAT, and with a static public IP address. But if it doesn't // work for you, just go back to configuring a STUN or TURN server for ICE. // </p> // <p> // Only set this parameter if you know what you're doing, and you understand 100% // WHY you need it. For the majority of cases, you should just prefer to // configure a STUN or TURN server. // </p> // <p><code>externalAddress</code> is a single IPv4 or IPv6 address.</p> // <p>Examples:</p> // <ul> // <li><code>externalAddress=198.51.100.1</code></li> // <li><code>externalAddress=2001:0db8:85a3:0000:0000:8a2e:0370:7334</code></li> // </ul> // @deprecated Use <code>externalIPv4</code> and/or <code>externalIPv6</code> instead. // ExternalAddress string // the ICE candidate pair (local and remote candidates) used by the ICE library for each stream. ICECandidatePairs []*IceCandidatePair // the ICE connection state for all the connections. IceConnectionState []*IceConnection }
Control interface for Kurento WebRTC endpoint. <p> This endpoint is one side of a peer-to-peer WebRTC communication, being the other peer a WebRTC capable browser -using the RTCPeerConnection API-, a native WebRTC app or even another Kurento Media Server. </p> <p> In order to establish a WebRTC communication, peers engage in an SDP negotiation process, where one of the peers (the offerer) sends an offer, while the other peer (the offeree) responds with an answer. This endpoint can function in both situations </p> <ul> <li> As offerer: The negotiation process is initiated by the media server <ul> <li> KMS generates the SDP offer through the <code>generateOffer</code> method. This <i>offer</i> must then be sent to the remote peer (the offeree) through the signaling channel, for processing. </li> <li> The remote peer processes the <i>offer</i>, and generates an <i>answer</i>. The <i>answer</i> is sent back to the media server. </li> <li> Upon receiving the <i>answer</i>, the endpoint must invoke the <code>processAnswer</code> method. </li> </ul> </li> <li> As offeree: The negotiation process is initiated by the remote peer <ul> <li> The remote peer, acting as offerer, generates an SDP <i>offer</i> and sends it to the WebRTC endpoint in Kurento. </li> <li> The endpoint will process the <i>offer</i> invoking the <code>processOffer</code> method. The result of this method will be a string, containing an SDP <i>answer</i>. </li> <li> The SDP <i>answer</i> must be sent back to the offerer, so it can be processed. </li> </ul> </li> </ul> <p> SDPs are sent without ICE candidates, following the Trickle ICE optimization. Once the SDP negotiation is completed, both peers proceed with the ICE discovery process, intended to set up a bidirectional media connection. During this process, each peer </p> <ul> <li> Discovers ICE candidates for itself, containing pairs of IPs and ports. </li> <li> ICE candidates are sent via the signaling channel as they are discovered, to the remote peer for probing. </li> <li> ICE connectivity checks are run as soon as the new candidate description, from the remote peer, is available. </li> </ul> <p> Once a suitable pair of candidates (one for each peer) is discovered, the media session can start. The harvesting process in Kurento, begins with the invocation of the <code>gatherCandidates</code> method. Since the whole Trickle ICE purpose is to speed-up connectivity, candidates are generated asynchronously. Therefore, in order to capture the candidates, the user must subscribe to the event <code>IceCandidateFound</code>. It is important that the event listener is bound before invoking <code>gatherCandidates</code>, otherwise a suitable candidate might be lost, and connection might not be established. </p> <p> It's important to keep in mind that WebRTC connection is an asynchronous process, when designing interactions between different MediaElements. For example, it would be pointless to start recording before media is flowing. In order to be notified of state changes, the application can subscribe to events generated by the WebRtcEndpoint. Following is a full list of events generated by WebRtcEndpoint: </p> <ul> <li> <code>IceComponentStateChange</code>: This event informs only about changes in the ICE connection state. Possible values are: <ul> <li><code>DISCONNECTED</code>: No activity scheduled</li> <li><code>GATHERING</code>: Gathering local candidates</li> <li><code>CONNECTING</code>: Establishing connectivity</li> <li><code>CONNECTED</code>: At least one working candidate pair</li> <li> <code>READY</code>: ICE concluded, candidate pair selection is now final </li> <li> <code>FAILED</code>: Connectivity checks have been completed, but media connection was not established </li> </ul> The transitions between states are covered in RFC5245. It could be said that it's network-only, as it only takes into account the state of the network connection, ignoring other higher level stuff, like DTLS handshake, RTCP flow, etc. This implies that, while the component state is <code>CONNECTED</code>, there might be no media flowing between the peers. This makes this event useful only to receive low-level information about the connection between peers. Even more, while other events might leave a graceful period of time before firing, this event fires immediately after the state change is detected. </li> <li> <code>IceCandidateFound</code>: Raised when a new candidate is discovered. ICE candidates must be sent to the remote peer of the connection. Failing to do so for some or all of the candidates might render the connection unusable. </li> <li> <code>IceGatheringDone</code>: Raised when the ICE gathering process is completed. This means that all candidates have already been discovered. </li> <li> <code>NewCandidatePairSelected</code>: Raised when a new ICE candidate pair gets selected. The pair contains both local and remote candidates being used for a component. This event can be raised during a media session, if a new pair of candidates with higher priority in the link are found. </li> <li><code>DataChannelOpen</code>: Raised when a data channel is open.</li> <li><code>DataChannelClose</code>: Raised when a data channel is closed.</li> </ul> <p> Registering to any of above events requires the application to provide a callback function. Each event provides different information, so it is recommended to consult the signature of the event listeners. </p> <p> Flow control and congestion management is one of the most important features of WebRTC. WebRTC connections start with the lowest bandwidth configured and slowly ramps up to the maximum available bandwidth, or to the higher limit of the exploration range in case no bandwidth limitation is detected. Notice that WebRtcEndpoints in Kurento are designed in a way that multiple WebRTC connections fed by the same stream share quality. When a new connection is added, as it requires to start with low bandwidth, it will cause the rest of connections to experience a transient period of degraded quality, until it stabilizes its bitrate. This doesn't apply when transcoding is involved. Transcoders will adjust their output bitrate based in bandwidth requirements, but it won't affect the original stream. If an incoming WebRTC stream needs to be transcoded, for whatever reason, all WebRtcEndpoints fed from transcoder output will share a separate quality than the ones connected directly to the original stream. </p> <p> Note that the default <strong>VideoSendBandwidth</strong> range of the endpoint is a VERY conservative one, and leads to a low maximum video quality. Most applications will probably want to increase this to higher values such as 2000 kbps (2 mbps). </p> <p> <strong> Check the extended documentation of these parameters in `SdpEndpoint`, `BaseRtpEndpoint`, and `RembParams`. </strong> </p> <ul> <li> Input bandwidth: Values used to inform remote peers about the bitrate that can be sent to this endpoint. <ul> <li> <strong>MinVideoRecvBandwidth</strong>: Minimum input bitrate, requested from WebRTC senders with REMB (Default: 30 Kbps). </li> <li> <strong>MaxAudioRecvBandwidth</strong> and <strong>MaxVideoRecvBandwidth</strong>: Maximum input bitrate, signaled in SDP Offers to WebRTC and RTP senders (Default: unlimited). </li> </ul> </li> <li> Output bandwidth: Values used to control bitrate of the video streams sent to remote peers. It is important to keep in mind that pushed bitrate depends on network and remote peer capabilities. Remote peers can also announce bandwidth limitation in their SDPs (through the <code>b={modifier}:{value}</code> attribute). Kurento will always enforce bitrate limitations specified by the remote peer over internal configurations. <ul> <li> <strong>MinVideoSendBandwidth</strong>: REMB override of minimum bitrate sent to WebRTC receivers (Default: 100 Kbps). </li> <li> <strong>MaxVideoSendBandwidth</strong>: REMB override of maximum bitrate sent to WebRTC receivers (Default: 500 Kbps). </li> <li> <strong>RembParams.rembOnConnect</strong>: Initial local REMB bandwidth estimation that gets propagated when a new endpoint is connected. </li> </ul> </li> </ul> <p> <strong> All bandwidth control parameters must be changed before the SDP negotiation takes place, and can't be changed afterwards. </strong> </p> <p> DataChannels allow other media elements that make use of the DataPad, to send arbitrary data. For instance, if there is a filter that publishes event information, it'll be sent to the remote peer through the channel. There is no API available for programmers to make use of this feature in the WebRtcElement. DataChannels can be configured to provide the following: </p> <ul> <li>Reliable or partially reliable delivery of sent messages</li> <li>In-order or out-of-order delivery of sent messages</li> </ul> <p> Unreliable, out-of-order delivery is equivalent to raw UDP semantics. The message may make it, or it may not, and order is not important. However, the channel can be configured to be <i>partially reliable</i> by specifying the maximum number of retransmissions or setting a time limit for retransmissions: the WebRTC stack will handle the acknowledgments and timeouts. </p> <p> The possibility to create DataChannels in a WebRtcEndpoint must be explicitly enabled when creating the endpoint, as this feature is disabled by default. If this is the case, they can be created invoking the createDataChannel method. The arguments for this method, all of them optional, provide the necessary configuration: </p> <ul> <li> <code>label</code>: assigns a label to the DataChannel. This can help identify each possible channel separately. </li> <li> <code>ordered</code>: specifies if the DataChannel guarantees order, which is the default mode. If maxPacketLifetime and maxRetransmits have not been set, this enables reliable mode. </li> <li> <code>maxPacketLifeTime</code>: The time window in milliseconds, during which transmissions and retransmissions may take place in unreliable mode. This forces unreliable mode, even if <code>ordered</code> has been activated. </li> <li> <code>maxRetransmits</code>: maximum number of retransmissions that are attempted in unreliable mode. This forces unreliable mode, even if <code>ordered</code> has been activated. </li> <li> <code>Protocol</code>: Name of the subprotocol used for data communication. </li> </ul>
func (*WebRtcEndpoint) AddIceCandidate ¶
func (elem *WebRtcEndpoint) AddIceCandidate(candidate IceCandidate) error
Process an ICE candidate sent by the remote peer of the connection.
func (*WebRtcEndpoint) CloseDataChannel ¶
func (elem *WebRtcEndpoint) CloseDataChannel(channelId int) error
Closes an open data channel
func (*WebRtcEndpoint) CreateDataChannel ¶
func (elem *WebRtcEndpoint) CreateDataChannel(label string, ordered bool, maxPacketLifeTime int, maxRetransmits int, protocol string) error
Create a new data channel, if data channels are supported. <p> Being supported means that the WebRtcEndpoint has been created with data channel support, the client also supports data channels, and they have been negotiated in the SDP exchange. Otherwise, the method throws an exception, indicating that the operation is not possible. </p> <p> Data channels can work in either unreliable mode (analogous to User Datagram Protocol or UDP) or reliable mode (analogous to Transmission Control Protocol or TCP). The two modes have a simple distinction: </p> <ul> <li> Reliable mode guarantees the transmission of messages and also the order in which they are delivered. This takes extra overhead, thus potentially making this mode slower. </li> <li> Unreliable mode does not guarantee every message will get to the other side nor what order they get there. This removes the overhead, allowing this mode to work much faster. </li> </ul> <p>If data channels are not supported, this method throws an exception.</p>
func (*WebRtcEndpoint) GatherCandidates ¶
func (elem *WebRtcEndpoint) GatherCandidates() error
Start the ICE candidate gathering. <p> This method triggers the asynchronous discovery of ICE candidates (as per the Trickle ICE mechanism), and returns immediately. Every newly trickled candidate is reported to the application by means of an <code>IceCandidateFound</code> event. Finally, when all candidates have been gathered, the <code>IceGatheringDone</code> event is emitted. </p> <p> Normally, you would call this method as soon as possible after calling <code>SdpEndpoint::generateOffer</code> or <code>SdpEndpoint::processOffer</code>, to quickly start discovering candidates and sending them to the remote peer. </p> <p> You can also call this method <em>before</em> calling <code>generateOffer</code> or <code>processOffer</code>. Doing so will include any already gathered candidates into the resulting SDP. You can leverage this behavior to implement fully traditional ICE (without Trickle): first call <code>gatherCandidates</code>, then only handle the SDP messages after the <code>IceGatheringDone</code> event has been received. This way, you're making sure that all candidates have indeed been gathered, so the resulting SDP will include all of them. </p>
Notes ¶
Bugs ¶
a recursion happens while testing, I must find why
Source Files
¶
- AlphaBlending.go
- Composite.go
- Dispatcher.go
- DispatcherOneToMany.go
- EndOfStream.go
- HttpEndpoint.go
- MediaProfileSpecType.go
- Mixer.go
- PlayerEndpoint.go
- RecorderEndpoint.go
- RtpEndpoint.go
- WebRtcEndpoint.go
- base.go
- complexTypes_AudioCaps.go
- complexTypes_AudioCodec.go
- complexTypes_CertificateKeyType.go
- complexTypes_CodecConfiguration.go
- complexTypes_ConnectionState.go
- complexTypes_CryptoSuite.go
- complexTypes_DSCPValue.go
- complexTypes_ElementConnectionData.go
- complexTypes_ElementStats.go
- complexTypes_EndpointStats.go
- complexTypes_FilterType.go
- complexTypes_Fraction.go
- complexTypes_GapsFixMethod.go
- complexTypes_GstreamerDotDetails.go
- complexTypes_IceCandidate.go
- complexTypes_IceCandidatePair.go
- complexTypes_IceComponentState.go
- complexTypes_IceConnection.go
- complexTypes_MediaFlowState.go
- complexTypes_MediaLatencyStat.go
- complexTypes_MediaProfileSpecType.go
- complexTypes_MediaState.go
- complexTypes_MediaTranscodingState.go
- complexTypes_MediaType.go
- complexTypes_ModuleInfo.go
- complexTypes_OfferOptions.go
- complexTypes_RTCCertificateStats.go
- complexTypes_RTCCodec.go
- complexTypes_RTCDataChannelState.go
- complexTypes_RTCDataChannelStats.go
- complexTypes_RTCIceCandidateAttributes.go
- complexTypes_RTCIceCandidatePairStats.go
- complexTypes_RTCInboundRTPStreamStats.go
- complexTypes_RTCMediaStreamStats.go
- complexTypes_RTCMediaStreamTrackStats.go
- complexTypes_RTCOutboundRTPStreamStats.go
- complexTypes_RTCPeerConnectionStats.go
- complexTypes_RTCRTPStreamStats.go
- complexTypes_RTCStats.go
- complexTypes_RTCStatsIceCandidatePairState.go
- complexTypes_RTCStatsIceCandidateType.go
- complexTypes_RTCTransportStats.go
- complexTypes_RembParams.go
- complexTypes_SDES.go
- complexTypes_ServerInfo.go
- complexTypes_ServerType.go
- complexTypes_Stats.go
- complexTypes_StatsType.go
- complexTypes_Tag.go
- complexTypes_UriEndpointState.go
- complexTypes_VideoCaps.go
- complexTypes_VideoCodec.go
- complexTypes_VideoInfo.go
- core.go
- websocket.go